Skip to content

Commit

Permalink
feat: Adds additional_experiments field to AutoMlTablesInputs
Browse files Browse the repository at this point in the history
feat: Adds two new ModelType constants for Video Action Recognition training jobs

PiperOrigin-RevId: 384483418
  • Loading branch information
Google APIs authored and copybara-github committed Jul 13, 2021
1 parent f81e4aa commit 369fd2a
Show file tree
Hide file tree
Showing 14 changed files with 44 additions and 19 deletions.
20 changes: 15 additions & 5 deletions google/cloud/aiplatform/v1/schema/aiplatform_v1.yaml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
type: google.api.Service
config_version: 3
name: aiplatform.googleapis.com
title: Cloud AI Platform API
title: Vertex AI API

types:
- name: google.cloud.aiplatform.v1.schema.predict.instance.ImageClassificationPredictionInstance
Expand Down Expand Up @@ -64,10 +64,10 @@ documentation:
Train high-quality custom machine learning models with minimal machine
learning expertise and effort.
overview: |-
AI Platform (Unified) enables data scientists, developers, and AI newcomers
to create custom machine learning models specific to their business needs
by leveraging Google's state-of-the-art transfer learning and innovative
AI research.
Vertex AI enables data scientists, developers, and AI newcomers to create
custom machine learning models specific to their business needs by
leveraging Google's state-of-the-art transfer learning and innovative AI
research.
rules:
- selector: google.cloud.location.Locations.GetLocation
description: Gets information about a location.
Expand Down Expand Up @@ -115,6 +115,7 @@ http:
- post: '/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel'
- post: '/ui/{name=projects/*/locations/*/models/*/operations/*}:cancel'
- post: '/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel'
- post: '/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel'
- post: '/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel'
- post: '/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel'
- post: '/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel'
Expand Down Expand Up @@ -142,6 +143,7 @@ http:
- post: '/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel'
- post: '/v1/{name=projects/*/locations/*/models/*/operations/*}:cancel'
- post: '/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel'
- post: '/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel'
- post: '/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel'
- post: '/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel'
- post: '/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel'
Expand Down Expand Up @@ -171,6 +173,7 @@ http:
- delete: '/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}'
- delete: '/ui/{name=projects/*/locations/*/models/*/operations/*}'
- delete: '/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}'
- delete: '/ui/{name=projects/*/locations/*/studies/*/operations/*}'
- delete: '/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}'
- delete: '/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}'
- delete: '/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}'
Expand Down Expand Up @@ -198,6 +201,7 @@ http:
- delete: '/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}'
- delete: '/v1/{name=projects/*/locations/*/models/*/operations/*}'
- delete: '/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}'
- delete: '/v1/{name=projects/*/locations/*/studies/*/operations/*}'
- delete: '/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}'
- delete: '/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}'
- delete: '/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}'
Expand Down Expand Up @@ -227,6 +231,7 @@ http:
- get: '/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}'
- get: '/ui/{name=projects/*/locations/*/models/*/operations/*}'
- get: '/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}'
- get: '/ui/{name=projects/*/locations/*/studies/*/operations/*}'
- get: '/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}'
- get: '/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}'
- get: '/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}'
Expand Down Expand Up @@ -254,6 +259,7 @@ http:
- get: '/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}'
- get: '/v1/{name=projects/*/locations/*/models/*/operations/*}'
- get: '/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}'
- get: '/v1/{name=projects/*/locations/*/studies/*/operations/*}'
- get: '/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}'
- get: '/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}'
- get: '/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}'
Expand Down Expand Up @@ -283,6 +289,7 @@ http:
- get: '/ui/{name=projects/*/locations/*/migratableResources/*}/operations'
- get: '/ui/{name=projects/*/locations/*/models/*}/operations'
- get: '/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations'
- get: '/ui/{name=projects/*/locations/*/studies/*}/operations'
- get: '/ui/{name=projects/*/locations/*/studies/*/trials/*}/operations'
- get: '/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations'
- get: '/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations'
Expand Down Expand Up @@ -310,6 +317,7 @@ http:
- get: '/v1/{name=projects/*/locations/*/migratableResources/*}/operations'
- get: '/v1/{name=projects/*/locations/*/models/*}/operations'
- get: '/v1/{name=projects/*/locations/*/models/*/evaluations/*}/operations'
- get: '/v1/{name=projects/*/locations/*/studies/*}/operations'
- get: '/v1/{name=projects/*/locations/*/studies/*/trials/*}/operations'
- get: '/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations'
- get: '/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations'
Expand Down Expand Up @@ -339,6 +347,7 @@ http:
- post: '/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait'
- post: '/ui/{name=projects/*/locations/*/models/*/operations/*}:wait'
- post: '/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait'
- post: '/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait'
- post: '/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait'
- post: '/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait'
- post: '/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait'
Expand Down Expand Up @@ -366,6 +375,7 @@ http:
- post: '/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait'
- post: '/v1/{name=projects/*/locations/*/models/*/operations/*}:wait'
- post: '/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait'
- post: '/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait'
- post: '/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait'
- post: '/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait'
- post: '/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait'
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ option ruby_package = "Google::Cloud::AIPlatform::V1::Schema::Predict::Instance"

// Prediction input format for Image Classification.
message ImageClassificationPredictionInstance {
// The image bytes or GCS URI to make the prediction on.
// The image bytes or Cloud Storage URI to make the prediction on.
string content = 1;

// The MIME type of the content of the image. Only the images in below listed
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ option ruby_package = "Google::Cloud::AIPlatform::V1::Schema::Predict::Instance"

// Prediction input format for Image Object Detection.
message ImageObjectDetectionPredictionInstance {
// The image bytes or GCS URI to make the prediction on.
// The image bytes or Cloud Storage URI to make the prediction on.
string content = 1;

// The MIME type of the content of the image. Only the images in below listed
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Copyright 2020 Google LLC
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ message TextExtractionPredictionInstance {

// This field is only used for batch prediction. If a key is provided, the
// batch prediction result will by mapped to this key. If omitted, then the
// batch prediction result will contain the entire input instance. AI Platform
// batch prediction result will contain the entire input instance. Vertex AI
// will not check if keys in the request are duplicates, so it is up to the
// caller to ensure the keys are unique.
string key = 3;
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Copyright 2020 Google LLC
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,15 +37,15 @@ message VideoClassificationPredictionParams {
// fewer predictions. Default value is 10,000.
int32 max_predictions = 2;

// Set to true to request segment-level classification. AI Platform returns
// Set to true to request segment-level classification. Vertex AI returns
// labels and their confidence scores for the entire time segment of the
// video that user specified in the input instance.
// Default value is true
bool segment_classification = 3;

// Set to true to request shot-level classification. AI Platform determines
// Set to true to request shot-level classification. Vertex AI determines
// the boundaries for each camera shot in the entire time segment of the
// video that user specified in the input instance. AI Platform then
// video that user specified in the input instance. Vertex AI then
// returns labels and their confidence scores for each detected shot, along
// with the start and end time of the shot.
// WARNING: Model evaluation is not done for this classification type,
Expand All @@ -55,7 +55,7 @@ message VideoClassificationPredictionParams {
bool shot_classification = 4;

// Set to true to request classification for a video at one-second intervals.
// AI Platform returns labels and their confidence scores for each second of
// Vertex AI returns labels and their confidence scores for each second of
// the entire time segment of the video that user specified in the input
// WARNING: Model evaluation is not done for this classification type, the
// quality of it depends on the training data, but there are no metrics
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,7 @@ option ruby_package = "Google::Cloud::AIPlatform::V1::Schema::Predict::Predictio

// Prediction output format for Image and Text Classification.
message ClassificationPredictionResult {
// The resource IDs of the AnnotationSpecs that had been identified, ordered
// by the confidence score descendingly.
// The resource IDs of the AnnotationSpecs that had been identified.
repeated int64 ids = 1;

// The display names of the AnnotationSpecs that had been identified, order
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Copyright 2020 Google LLC
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ proto_library(
],
deps = [
"//google/api:annotations_proto",
"//google/api:field_behavior_proto",
],
)

Expand Down Expand Up @@ -63,6 +64,7 @@ go_proto_library(
protos = [":definition_proto"],
deps = [
"//google/api:annotations_go_proto",
"//google/api:field_behavior_go_proto",
],
)

Expand All @@ -83,6 +85,7 @@ moved_proto_library(
srcs = [":definition_proto"],
deps = [
"//google/api:annotations_proto",
"//google/api:field_behavior_proto",
],
)

Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Copyright 2020 Google LLC
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ syntax = "proto3";

package google.cloud.aiplatform.v1.schema.trainingjob.definition;

import "google/api/field_behavior.proto";
import "google/cloud/aiplatform/v1/schema/trainingjob/definition/export_evaluated_data_items_config.proto";
import "google/api/annotations.proto";

Expand Down Expand Up @@ -270,6 +271,9 @@ message AutoMlTablesInputs {
// Configuration for exporting test set predictions to a BigQuery table. If
// this configuration is absent, then the export is not performed.
ExportEvaluatedDataItemsConfig export_evaluated_data_items_config = 10;

// Additional experiment flags for the Tables training pipeline.
repeated string additional_experiments = 11;
}

// Model metadata specific to AutoML Tables.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,16 @@ message AutoMlVideoActionRecognitionInputs {
// also be exported (see ModelService.ExportModel) as a TensorFlow or
// TensorFlow Lite model and used on a mobile or edge device afterwards.
MOBILE_VERSATILE_1 = 2;

// A model that, in addition to being available within Google Cloud, can
// also be exported (see ModelService.ExportModel) to a Jetson device
// afterwards.
MOBILE_JETSON_VERSATILE_1 = 3;

// A model that, in addition to being available within Google Cloud, can
// also be exported (see ModelService.ExportModel) as a TensorFlow or
// TensorFlow Lite model and used on a Coral device afterwards.
MOBILE_CORAL_VERSATILE_1 = 4;
}

ModelType model_type = 1;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@ message ExportEvaluatedDataItemsConfig {
//
// If not specified, then results are exported to the following auto-created
// BigQuery table:
//
// <project_id>:export_evaluated_examples_<model_name>_<yyyy_MM_dd'T'HH_mm_ss_SSS'Z'>.evaluated_examples
string destination_bigquery_uri = 1;

Expand Down

0 comments on commit 369fd2a

Please sign in to comment.