From 50bdb01504740ed31de788d8a160f3e2be7f55df Mon Sep 17 00:00:00 2001
From: Jaycee Li <102714969+jaycee-li@users.noreply.github.com>
Date: Tue, 10 May 2022 12:38:53 -0700
Subject: [PATCH 01/14] Feat: add batch_size kwarg for batch prediction jobs
(#1194)
* Add batch_size kwarg for batch prediction jobs
* Fix errors
Update the copyright year. Change the order of the argument. Fix the syntax error.
* fix: change description layout
---
google/cloud/aiplatform/jobs.py | 20 +++++++-
google/cloud/aiplatform/models.py | 11 ++++-
tests/unit/aiplatform/test_jobs.py | 6 +++
tests/unit/aiplatform/test_models.py | 69 +++++++++++++++-------------
4 files changed, 70 insertions(+), 36 deletions(-)
diff --git a/google/cloud/aiplatform/jobs.py b/google/cloud/aiplatform/jobs.py
index fc4f829882..00d6f11780 100644
--- a/google/cloud/aiplatform/jobs.py
+++ b/google/cloud/aiplatform/jobs.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
-# Copyright 2020 Google LLC
+# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -40,6 +40,7 @@
job_state as gca_job_state,
hyperparameter_tuning_job as gca_hyperparameter_tuning_job_compat,
machine_resources as gca_machine_resources_compat,
+ manual_batch_tuning_parameters as gca_manual_batch_tuning_parameters_compat,
study as gca_study_compat,
)
from google.cloud.aiplatform.constants import base as constants
@@ -376,6 +377,7 @@ def create(
encryption_spec_key_name: Optional[str] = None,
sync: bool = True,
create_request_timeout: Optional[float] = None,
+ batch_size: Optional[int] = None,
) -> "BatchPredictionJob":
"""Create a batch prediction job.
@@ -534,6 +536,13 @@ def create(
be immediately returned and synced when the Future has completed.
create_request_timeout (float):
Optional. The timeout for the create request in seconds.
+ batch_size (int):
+ Optional. The number of the records (e.g. instances) of the operation given in each batch
+ to a machine replica. Machine type, and size of a single record should be considered
+ when setting this parameter, higher value speeds up the batch operation's execution,
+ but too high value will result in a whole batch not fitting in a machine's memory,
+ and the whole operation will fail.
+ The default value is 64.
Returns:
(jobs.BatchPredictionJob):
Instantiated representation of the created batch prediction job.
@@ -647,7 +656,14 @@ def create(
gapic_batch_prediction_job.dedicated_resources = dedicated_resources
- gapic_batch_prediction_job.manual_batch_tuning_parameters = None
+ manual_batch_tuning_parameters = (
+ gca_manual_batch_tuning_parameters_compat.ManualBatchTuningParameters()
+ )
+ manual_batch_tuning_parameters.batch_size = batch_size
+
+ gapic_batch_prediction_job.manual_batch_tuning_parameters = (
+ manual_batch_tuning_parameters
+ )
# User Labels
gapic_batch_prediction_job.labels = labels
diff --git a/google/cloud/aiplatform/models.py b/google/cloud/aiplatform/models.py
index b15ed791bf..95f3044cbe 100644
--- a/google/cloud/aiplatform/models.py
+++ b/google/cloud/aiplatform/models.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
-# Copyright 2020 Google LLC
+# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -2284,6 +2284,7 @@ def batch_predict(
encryption_spec_key_name: Optional[str] = None,
sync: bool = True,
create_request_timeout: Optional[float] = None,
+ batch_size: Optional[int] = None,
) -> jobs.BatchPredictionJob:
"""Creates a batch prediction job using this Model and outputs
prediction results to the provided destination prefix in the specified
@@ -2442,6 +2443,13 @@ def batch_predict(
Overrides encryption_spec_key_name set in aiplatform.init.
create_request_timeout (float):
Optional. The timeout for the create request in seconds.
+ batch_size (int):
+ Optional. The number of the records (e.g. instances) of the operation given in each batch
+ to a machine replica. Machine type, and size of a single record should be considered
+ when setting this parameter, higher value speeds up the batch operation's execution,
+ but too high value will result in a whole batch not fitting in a machine's memory,
+ and the whole operation will fail.
+ The default value is 64.
Returns:
(jobs.BatchPredictionJob):
Instantiated representation of the created batch prediction job.
@@ -2462,6 +2470,7 @@ def batch_predict(
accelerator_count=accelerator_count,
starting_replica_count=starting_replica_count,
max_replica_count=max_replica_count,
+ batch_size=batch_size,
generate_explanation=generate_explanation,
explanation_metadata=explanation_metadata,
explanation_parameters=explanation_parameters,
diff --git a/tests/unit/aiplatform/test_jobs.py b/tests/unit/aiplatform/test_jobs.py
index 6b8d908dd2..73a4f8da0c 100644
--- a/tests/unit/aiplatform/test_jobs.py
+++ b/tests/unit/aiplatform/test_jobs.py
@@ -37,6 +37,7 @@
io as gca_io_compat,
job_state as gca_job_state_compat,
machine_resources as gca_machine_resources_compat,
+ manual_batch_tuning_parameters as gca_manual_batch_tuning_parameters_compat,
)
from google.cloud.aiplatform_v1.services.job_service import client as job_service_client
@@ -132,6 +133,7 @@
_TEST_ACCELERATOR_COUNT = 2
_TEST_STARTING_REPLICA_COUNT = 2
_TEST_MAX_REPLICA_COUNT = 12
+_TEST_BATCH_SIZE = 16
_TEST_LABEL = {"team": "experimentation", "trial_id": "x435"}
@@ -725,6 +727,7 @@ def test_batch_predict_with_all_args(
credentials=creds,
sync=sync,
create_request_timeout=None,
+ batch_size=_TEST_BATCH_SIZE,
)
batch_prediction_job.wait_for_resource_creation()
@@ -756,6 +759,9 @@ def test_batch_predict_with_all_args(
starting_replica_count=_TEST_STARTING_REPLICA_COUNT,
max_replica_count=_TEST_MAX_REPLICA_COUNT,
),
+ manual_batch_tuning_parameters=gca_manual_batch_tuning_parameters_compat.ManualBatchTuningParameters(
+ batch_size=_TEST_BATCH_SIZE
+ ),
generate_explanation=True,
explanation_spec=gca_explanation_compat.ExplanationSpec(
metadata=_TEST_EXPLANATION_METADATA,
diff --git a/tests/unit/aiplatform/test_models.py b/tests/unit/aiplatform/test_models.py
index f6561cffaa..eaf63d9fdd 100644
--- a/tests/unit/aiplatform/test_models.py
+++ b/tests/unit/aiplatform/test_models.py
@@ -49,6 +49,7 @@
env_var as gca_env_var,
explanation as gca_explanation,
machine_resources as gca_machine_resources,
+ manual_batch_tuning_parameters as gca_manual_batch_tuning_parameters_compat,
model_service as gca_model_service,
model_evaluation as gca_model_evaluation,
endpoint_service as gca_endpoint_service,
@@ -86,6 +87,8 @@
_TEST_STARTING_REPLICA_COUNT = 2
_TEST_MAX_REPLICA_COUNT = 12
+_TEST_BATCH_SIZE = 16
+
_TEST_PIPELINE_RESOURCE_NAME = (
"projects/my-project/locations/us-central1/trainingPipeline/12345"
)
@@ -1402,47 +1405,47 @@ def test_batch_predict_with_all_args(self, create_batch_prediction_job_mock, syn
encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME,
sync=sync,
create_request_timeout=None,
+ batch_size=_TEST_BATCH_SIZE,
)
if not sync:
batch_prediction_job.wait()
# Construct expected request
- expected_gapic_batch_prediction_job = (
- gca_batch_prediction_job.BatchPredictionJob(
- display_name=_TEST_BATCH_PREDICTION_DISPLAY_NAME,
- model=model_service_client.ModelServiceClient.model_path(
- _TEST_PROJECT, _TEST_LOCATION, _TEST_ID
- ),
- input_config=gca_batch_prediction_job.BatchPredictionJob.InputConfig(
- instances_format="jsonl",
- gcs_source=gca_io.GcsSource(
- uris=[_TEST_BATCH_PREDICTION_GCS_SOURCE]
- ),
- ),
- output_config=gca_batch_prediction_job.BatchPredictionJob.OutputConfig(
- gcs_destination=gca_io.GcsDestination(
- output_uri_prefix=_TEST_BATCH_PREDICTION_GCS_DEST_PREFIX
- ),
- predictions_format="csv",
- ),
- dedicated_resources=gca_machine_resources.BatchDedicatedResources(
- machine_spec=gca_machine_resources.MachineSpec(
- machine_type=_TEST_MACHINE_TYPE,
- accelerator_type=_TEST_ACCELERATOR_TYPE,
- accelerator_count=_TEST_ACCELERATOR_COUNT,
- ),
- starting_replica_count=_TEST_STARTING_REPLICA_COUNT,
- max_replica_count=_TEST_MAX_REPLICA_COUNT,
+ expected_gapic_batch_prediction_job = gca_batch_prediction_job.BatchPredictionJob(
+ display_name=_TEST_BATCH_PREDICTION_DISPLAY_NAME,
+ model=model_service_client.ModelServiceClient.model_path(
+ _TEST_PROJECT, _TEST_LOCATION, _TEST_ID
+ ),
+ input_config=gca_batch_prediction_job.BatchPredictionJob.InputConfig(
+ instances_format="jsonl",
+ gcs_source=gca_io.GcsSource(uris=[_TEST_BATCH_PREDICTION_GCS_SOURCE]),
+ ),
+ output_config=gca_batch_prediction_job.BatchPredictionJob.OutputConfig(
+ gcs_destination=gca_io.GcsDestination(
+ output_uri_prefix=_TEST_BATCH_PREDICTION_GCS_DEST_PREFIX
),
- generate_explanation=True,
- explanation_spec=gca_explanation.ExplanationSpec(
- metadata=_TEST_EXPLANATION_METADATA,
- parameters=_TEST_EXPLANATION_PARAMETERS,
+ predictions_format="csv",
+ ),
+ dedicated_resources=gca_machine_resources.BatchDedicatedResources(
+ machine_spec=gca_machine_resources.MachineSpec(
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
),
- labels=_TEST_LABEL,
- encryption_spec=_TEST_ENCRYPTION_SPEC,
- )
+ starting_replica_count=_TEST_STARTING_REPLICA_COUNT,
+ max_replica_count=_TEST_MAX_REPLICA_COUNT,
+ ),
+ manual_batch_tuning_parameters=gca_manual_batch_tuning_parameters_compat.ManualBatchTuningParameters(
+ batch_size=_TEST_BATCH_SIZE
+ ),
+ generate_explanation=True,
+ explanation_spec=gca_explanation.ExplanationSpec(
+ metadata=_TEST_EXPLANATION_METADATA,
+ parameters=_TEST_EXPLANATION_PARAMETERS,
+ ),
+ labels=_TEST_LABEL,
+ encryption_spec=_TEST_ENCRYPTION_SPEC,
)
create_batch_prediction_job_mock.assert_called_once_with(
From 243c6f6ea90b4228c572ec59f4568f8e1de6f80a Mon Sep 17 00:00:00 2001
From: Sara Robinson
Date: Tue, 10 May 2022 17:41:03 -0400
Subject: [PATCH 02/14] chore: cleanup on dataset system tests (#1214)
---
tests/system/aiplatform/test_dataset.py | 26 ++++++++++++++-----------
1 file changed, 15 insertions(+), 11 deletions(-)
diff --git a/tests/system/aiplatform/test_dataset.py b/tests/system/aiplatform/test_dataset.py
index f4033bfb7e..f152ed0e32 100644
--- a/tests/system/aiplatform/test_dataset.py
+++ b/tests/system/aiplatform/test_dataset.py
@@ -131,7 +131,7 @@ def setup_method(self):
@pytest.fixture()
def storage_client(self):
- yield storage.Client(project=e2e_base._PROJECT)
+ yield storage.Client(project=_TEST_PROJECT)
@pytest.fixture()
def staging_bucket(self, storage_client):
@@ -174,7 +174,7 @@ def test_get_new_dataset_and_import(self, dataset_gapic_client):
try:
text_dataset = aiplatform.TextDataset.create(
- display_name=f"temp_sdk_integration_test_create_text_dataset_{uuid.uuid4()}",
+ display_name=self._make_display_name(key="get_new_dataset_and_import"),
)
my_dataset = aiplatform.TextDataset(dataset_name=text_dataset.name)
@@ -189,7 +189,6 @@ def test_get_new_dataset_and_import(self, dataset_gapic_client):
my_dataset.import_data(
gcs_source=_TEST_TEXT_ENTITY_EXTRACTION_GCS_SOURCE,
import_schema_uri=_TEST_TEXT_ENTITY_IMPORT_SCHEMA,
- import_request_timeout=600.0,
)
data_items_post_import = dataset_gapic_client.list_data_items(
@@ -198,8 +197,7 @@ def test_get_new_dataset_and_import(self, dataset_gapic_client):
assert len(list(data_items_post_import)) == 469
finally:
- if text_dataset is not None:
- text_dataset.delete()
+ text_dataset.delete()
@vpcsc_config.skip_if_inside_vpcsc
def test_create_and_import_image_dataset(self, dataset_gapic_client):
@@ -208,7 +206,9 @@ def test_create_and_import_image_dataset(self, dataset_gapic_client):
try:
img_dataset = aiplatform.ImageDataset.create(
- display_name=f"temp_sdk_integration_create_and_import_dataset_{uuid.uuid4()}",
+ display_name=self._make_display_name(
+ key="create_and_import_image_dataset"
+ ),
gcs_source=_TEST_IMAGE_OBJECT_DETECTION_GCS_SOURCE,
import_schema_uri=_TEST_IMAGE_OBJ_DET_IMPORT_SCHEMA,
create_request_timeout=None,
@@ -230,7 +230,7 @@ def test_create_tabular_dataset(self):
try:
tabular_dataset = aiplatform.TabularDataset.create(
- display_name=f"temp_sdk_integration_create_and_import_dataset_{uuid.uuid4()}",
+ display_name=self._make_display_name(key="create_tabular_dataset"),
gcs_source=[_TEST_TABULAR_CLASSIFICATION_GCS_SOURCE],
create_request_timeout=None,
)
@@ -250,13 +250,15 @@ def test_create_tabular_dataset(self):
tabular_dataset.delete()
def test_create_tabular_dataset_from_dataframe(self, bigquery_dataset):
- bq_staging_table = f"bq://{e2e_base._PROJECT}.{bigquery_dataset.dataset_id}.test_table{uuid.uuid4()}"
+ bq_staging_table = f"bq://{_TEST_PROJECT}.{bigquery_dataset.dataset_id}.test_table{uuid.uuid4()}"
try:
tabular_dataset = aiplatform.TabularDataset.create_from_dataframe(
df_source=_TEST_DATAFRAME,
staging_path=bq_staging_table,
- display_name=f"temp_sdk_integration_create_and_import_dataset_from_dataframe{uuid.uuid4()}",
+ display_name=self._make_display_name(
+ key="create_and_import_dataset_from_dataframe"
+ ),
)
"""Use the Dataset.create_from_dataframe() method to create a new tabular dataset.
@@ -281,12 +283,14 @@ def test_create_tabular_dataset_from_dataframe_with_provided_schema(
created and references the BQ source."""
try:
- bq_staging_table = f"bq://{e2e_base._PROJECT}.{bigquery_dataset.dataset_id}.test_table{uuid.uuid4()}"
+ bq_staging_table = f"bq://{_TEST_PROJECT}.{bigquery_dataset.dataset_id}.test_table{uuid.uuid4()}"
tabular_dataset = aiplatform.TabularDataset.create_from_dataframe(
df_source=_TEST_DATAFRAME,
staging_path=bq_staging_table,
- display_name=f"temp_sdk_integration_create_and_import_dataset_from_dataframe{uuid.uuid4()}",
+ display_name=self._make_display_name(
+ key="create_and_import_dataset_from_dataframe"
+ ),
bq_schema=_TEST_DATAFRAME_BQ_SCHEMA,
)
From e9510ea6344a296e0c93ddf32280cf4c010ee4f1 Mon Sep 17 00:00:00 2001
From: Dan Lee <71398022+dandhlee@users.noreply.github.com>
Date: Tue, 10 May 2022 21:25:13 -0400
Subject: [PATCH 03/14] docs: update aiplatform SDK arrangement for Sphinx
(#1163)
* docs: update aiplatform SDK sphinx arrangement
* docs: remove unneeded aiplatform.rst file
* docs: remove moves files
Co-authored-by: Karl Weinmeister <11586922+kweinmeister@users.noreply.github.com>
---
docs/{ => aiplatform}/definition_v1/types.rst | 0
docs/{ => aiplatform}/definition_v1beta1/types.rst | 0
docs/{ => aiplatform}/instance_v1/types.rst | 0
docs/{ => aiplatform}/instance_v1beta1/types.rst | 0
docs/{ => aiplatform}/params_v1/types.rst | 0
docs/{ => aiplatform}/params_v1beta1/types.rst | 0
docs/{ => aiplatform}/prediction_v1/types.rst | 0
docs/{ => aiplatform}/prediction_v1beta1/types.rst | 0
docs/{aiplatform.rst => aiplatform/services.rst} | 2 +-
docs/aiplatform/types.rst | 13 +++++++++++++
docs/index.rst | 6 ++++--
11 files changed, 18 insertions(+), 3 deletions(-)
rename docs/{ => aiplatform}/definition_v1/types.rst (100%)
rename docs/{ => aiplatform}/definition_v1beta1/types.rst (100%)
rename docs/{ => aiplatform}/instance_v1/types.rst (100%)
rename docs/{ => aiplatform}/instance_v1beta1/types.rst (100%)
rename docs/{ => aiplatform}/params_v1/types.rst (100%)
rename docs/{ => aiplatform}/params_v1beta1/types.rst (100%)
rename docs/{ => aiplatform}/prediction_v1/types.rst (100%)
rename docs/{ => aiplatform}/prediction_v1beta1/types.rst (100%)
rename docs/{aiplatform.rst => aiplatform/services.rst} (84%)
create mode 100644 docs/aiplatform/types.rst
diff --git a/docs/definition_v1/types.rst b/docs/aiplatform/definition_v1/types.rst
similarity index 100%
rename from docs/definition_v1/types.rst
rename to docs/aiplatform/definition_v1/types.rst
diff --git a/docs/definition_v1beta1/types.rst b/docs/aiplatform/definition_v1beta1/types.rst
similarity index 100%
rename from docs/definition_v1beta1/types.rst
rename to docs/aiplatform/definition_v1beta1/types.rst
diff --git a/docs/instance_v1/types.rst b/docs/aiplatform/instance_v1/types.rst
similarity index 100%
rename from docs/instance_v1/types.rst
rename to docs/aiplatform/instance_v1/types.rst
diff --git a/docs/instance_v1beta1/types.rst b/docs/aiplatform/instance_v1beta1/types.rst
similarity index 100%
rename from docs/instance_v1beta1/types.rst
rename to docs/aiplatform/instance_v1beta1/types.rst
diff --git a/docs/params_v1/types.rst b/docs/aiplatform/params_v1/types.rst
similarity index 100%
rename from docs/params_v1/types.rst
rename to docs/aiplatform/params_v1/types.rst
diff --git a/docs/params_v1beta1/types.rst b/docs/aiplatform/params_v1beta1/types.rst
similarity index 100%
rename from docs/params_v1beta1/types.rst
rename to docs/aiplatform/params_v1beta1/types.rst
diff --git a/docs/prediction_v1/types.rst b/docs/aiplatform/prediction_v1/types.rst
similarity index 100%
rename from docs/prediction_v1/types.rst
rename to docs/aiplatform/prediction_v1/types.rst
diff --git a/docs/prediction_v1beta1/types.rst b/docs/aiplatform/prediction_v1beta1/types.rst
similarity index 100%
rename from docs/prediction_v1beta1/types.rst
rename to docs/aiplatform/prediction_v1beta1/types.rst
diff --git a/docs/aiplatform.rst b/docs/aiplatform/services.rst
similarity index 84%
rename from docs/aiplatform.rst
rename to docs/aiplatform/services.rst
index bf5cd4625b..0d21fe6bd1 100644
--- a/docs/aiplatform.rst
+++ b/docs/aiplatform/services.rst
@@ -3,4 +3,4 @@ Google Cloud Aiplatform SDK
.. automodule:: google.cloud.aiplatform
:members:
- :show-inheritance:
\ No newline at end of file
+ :show-inheritance:
diff --git a/docs/aiplatform/types.rst b/docs/aiplatform/types.rst
new file mode 100644
index 0000000000..119f762bca
--- /dev/null
+++ b/docs/aiplatform/types.rst
@@ -0,0 +1,13 @@
+Types for Google Cloud Aiplatform SDK API
+===========================================
+.. toctree::
+ :maxdepth: 2
+
+ instance_v1
+ instance_v1beta1
+ params_v1
+ params_v1beta1
+ prediction_v1
+ prediction_v1beta1
+ definition_v1
+ definition_v1beta1
diff --git a/docs/index.rst b/docs/index.rst
index 031271a261..6094720bd8 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -7,7 +7,9 @@ API Reference
.. toctree::
:maxdepth: 2
- aiplatform
+ aiplatform/services
+ aiplatform/types
+
aiplatform_v1/services
aiplatform_v1/types
@@ -22,4 +24,4 @@ For a list of all ``google-cloud-aiplatform`` releases:
.. toctree::
:maxdepth: 2
- changelog
\ No newline at end of file
+ changelog
From 5fdf151ee0d0a630c07a75dc8f19906e7ad1aa8a Mon Sep 17 00:00:00 2001
From: yan283 <104038473+yan283@users.noreply.github.com>
Date: Wed, 11 May 2022 14:26:37 -0700
Subject: [PATCH 04/14] fix: check in service proto file (#1174)
Co-authored-by: Ivan Cheung
---
.../_protos/match_service.proto | 136 ++++++++++++++++++
1 file changed, 136 insertions(+)
create mode 100644 google/cloud/aiplatform/matching_engine/_protos/match_service.proto
diff --git a/google/cloud/aiplatform/matching_engine/_protos/match_service.proto b/google/cloud/aiplatform/matching_engine/_protos/match_service.proto
new file mode 100644
index 0000000000..158b0f146a
--- /dev/null
+++ b/google/cloud/aiplatform/matching_engine/_protos/match_service.proto
@@ -0,0 +1,136 @@
+syntax = "proto3";
+
+package google.cloud.aiplatform.container.v1beta1;
+
+import "google/rpc/status.proto";
+
+// MatchService is a Google managed service for efficient vector similarity
+// search at scale.
+service MatchService {
+ // Returns the nearest neighbors for the query. If it is a sharded
+ // deployment, calls the other shards and aggregates the responses.
+ rpc Match(MatchRequest) returns (MatchResponse) {}
+
+ // Returns the nearest neighbors for batch queries. If it is a sharded
+ // deployment, calls the other shards and aggregates the responses.
+ rpc BatchMatch(BatchMatchRequest) returns (BatchMatchResponse) {}
+}
+
+// Parameters for a match query.
+message MatchRequest {
+ // The ID of the DeploydIndex that will serve the request.
+ // This MatchRequest is sent to a specific IndexEndpoint of the Control API,
+ // as per the IndexEndpoint.network. That IndexEndpoint also has
+ // IndexEndpoint.deployed_indexes, and each such index has an
+ // DeployedIndex.id field.
+ // The value of the field below must equal one of the DeployedIndex.id
+ // fields of the IndexEndpoint that is being called for this request.
+ string deployed_index_id = 1;
+
+ // The embedding values.
+ repeated float float_val = 2;
+
+ // The number of nearest neighbors to be retrieved from database for
+ // each query. If not set, will use the default from
+ // the service configuration.
+ int32 num_neighbors = 3;
+
+ // The list of restricts.
+ repeated Namespace restricts = 4;
+
+ // Crowding is a constraint on a neighbor list produced by nearest neighbor
+ // search requiring that no more than some value k' of the k neighbors
+ // returned have the same value of crowding_attribute.
+ // It's used for improving result diversity.
+ // This field is the maximum number of matches with the same crowding tag.
+ int32 per_crowding_attribute_num_neighbors = 5;
+
+ // The number of neighbors to find via approximate search before
+ // exact reordering is performed. If not set, the default value from scam
+ // config is used; if set, this value must be > 0.
+ int32 approx_num_neighbors = 6;
+
+ // The fraction of the number of leaves to search, set at query time allows
+ // user to tune search performance. This value increase result in both search
+ // accuracy and latency increase. The value should be between 0.0 and 1.0. If
+ // not set or set to 0.0, query uses the default value specified in
+ // NearestNeighborSearchConfig.TreeAHConfig.leaf_nodes_to_search_percent.
+ int32 leaf_nodes_to_search_percent_override = 7;
+}
+
+// Response of a match query.
+message MatchResponse {
+ message Neighbor {
+ // The ids of the matches.
+ string id = 1;
+
+ // The distances of the matches.
+ double distance = 2;
+ }
+ // All its neighbors.
+ repeated Neighbor neighbor = 1;
+}
+
+// Parameters for a batch match query.
+message BatchMatchRequest {
+ // Batched requests against one index.
+ message BatchMatchRequestPerIndex {
+ // The ID of the DeploydIndex that will serve the request.
+ string deployed_index_id = 1;
+
+ // The requests against the index identified by the above deployed_index_id.
+ repeated MatchRequest requests = 2;
+
+ // Selects the optimal batch size to use for low-level batching. Queries
+ // within each low level batch are executed sequentially while low level
+ // batches are executed in parallel.
+ // This field is optional, defaults to 0 if not set. A non-positive number
+ // disables low level batching, i.e. all queries are executed sequentially.
+ int32 low_level_batch_size = 3;
+ }
+
+ // The batch requests grouped by indexes.
+ repeated BatchMatchRequestPerIndex requests = 1;
+}
+
+// Response of a batch match query.
+message BatchMatchResponse {
+ // Batched responses for one index.
+ message BatchMatchResponsePerIndex {
+ // The ID of the DeployedIndex that produced the responses.
+ string deployed_index_id = 1;
+
+ // The match responses produced by the index identified by the above
+ // deployed_index_id. This field is set only when the query against that
+ // index succeed.
+ repeated MatchResponse responses = 2;
+
+ // The status of response for the batch query identified by the above
+ // deployed_index_id.
+ google.rpc.Status status = 3;
+ }
+
+ // The batched responses grouped by indexes.
+ repeated BatchMatchResponsePerIndex responses = 1;
+}
+
+// Namespace specifies the rules for determining the datapoints that are
+// eligible for each matching query, overall query is an AND across namespaces.
+message Namespace {
+ // The string name of the namespace that this proto is specifying,
+ // such as "color", "shape", "geo", or "tags".
+ string name = 1;
+
+ // The allowed tokens in the namespace.
+ repeated string allow_tokens = 2;
+
+ // The denied tokens in the namespace.
+ // The denied tokens have exactly the same format as the token fields, but
+ // represents a negation. When a token is denied, then matches will be
+ // excluded whenever the other datapoint has that token.
+ //
+ // For example, if a query specifies {color: red, blue, !purple}, then that
+ // query will match datapoints that are red or blue, but if those points are
+ // also purple, then they will be excluded even if they are red/blue.
+ repeated string deny_tokens = 3;
+}
From a2eb8cc4f1618c53ceb96c59413dada4060e8880 Mon Sep 17 00:00:00 2001
From: Sam Goodman
Date: Wed, 18 May 2022 09:19:01 -0700
Subject: [PATCH 05/14] chore: Remove _v1 specific references in favor of
compat types and services (#1235)
Co-authored-by: Sam Goodman
---
google/cloud/aiplatform/compat/__init__.py | 22 ++++-
.../cloud/aiplatform/compat/types/__init__.py | 20 ++--
google/cloud/aiplatform/models.py | 3 +-
google/cloud/aiplatform/pipeline_jobs.py | 24 +++--
.../test_automl_forecasting_training_jobs.py | 11 +--
.../test_automl_image_training_jobs.py | 10 +-
.../test_automl_tabular_training_jobs.py | 11 +--
.../test_automl_text_training_jobs.py | 10 +-
.../test_automl_video_training_jobs.py | 10 +-
tests/unit/aiplatform/test_custom_job.py | 2 +-
tests/unit/aiplatform/test_datasets.py | 6 +-
tests/unit/aiplatform/test_end_to_end.py | 2 +-
tests/unit/aiplatform/test_endpoints.py | 12 +--
tests/unit/aiplatform/test_explain_lit.py | 8 +-
tests/unit/aiplatform/test_featurestores.py | 10 +-
.../test_hyperparameter_tuning_job.py | 2 +-
tests/unit/aiplatform/test_initializer.py | 4 +-
tests/unit/aiplatform/test_jobs.py | 4 +-
.../aiplatform/test_matching_engine_index.py | 6 +-
.../test_matching_engine_index_endpoint.py | 37 ++++---
tests/unit/aiplatform/test_metadata_store.py | 4 +-
.../unit/aiplatform/test_model_evaluation.py | 8 +-
tests/unit/aiplatform/test_models.py | 10 +-
tests/unit/aiplatform/test_pipeline_jobs.py | 98 +++++++++----------
tests/unit/aiplatform/test_tensorboard.py | 6 +-
tests/unit/aiplatform/test_training_jobs.py | 13 +--
tests/unit/aiplatform/test_uploader.py | 15 +--
tests/unit/aiplatform/test_uploader_main.py | 4 +-
28 files changed, 180 insertions(+), 192 deletions(-)
diff --git a/google/cloud/aiplatform/compat/__init__.py b/google/cloud/aiplatform/compat/__init__.py
index dd141b6653..6aea51d133 100644
--- a/google/cloud/aiplatform/compat/__init__.py
+++ b/google/cloud/aiplatform/compat/__init__.py
@@ -40,6 +40,10 @@
)
services.metadata_service_client = services.metadata_service_client_v1beta1
services.tensorboard_service_client = services.tensorboard_service_client_v1beta1
+ services.index_service_client = services.index_service_client_v1beta1
+ services.index_endpoint_service_client = (
+ services.index_endpoint_service_client_v1beta1
+ )
types.accelerator_type = types.accelerator_type_v1beta1
types.annotation = types.annotation_v1beta1
@@ -71,6 +75,8 @@
types.featurestore_online_service = types.featurestore_online_service_v1beta1
types.featurestore_service = types.featurestore_service_v1beta1
types.hyperparameter_tuning_job = types.hyperparameter_tuning_job_v1beta1
+ types.index = types.index_v1beta1
+ types.index_endpoint = types.index_endpoint_v1beta1
types.io = types.io_v1beta1
types.job_service = types.job_service_v1beta1
types.job_state = types.job_state_v1beta1
@@ -79,8 +85,8 @@
types.matching_engine_deployed_index_ref = (
types.matching_engine_deployed_index_ref_v1beta1
)
- types.matching_engine_index = types.matching_engine_index_v1beta1
- types.matching_engine_index_endpoint = types.matching_engine_index_endpoint_v1beta1
+ types.matching_engine_index = types.index_v1beta1
+ types.matching_engine_index_endpoint = types.index_endpoint_v1beta1
types.metadata_service = types.metadata_service_v1beta1
types.metadata_store = types.metadata_store_v1beta1
types.model = types.model_v1beta1
@@ -88,6 +94,7 @@
types.model_evaluation_slice = types.model_evaluation_slice_v1beta1
types.model_service = types.model_service_v1beta1
types.operation = types.operation_v1beta1
+ types.pipeline_job = types.pipeline_job_v1beta1
types.pipeline_service = types.pipeline_service_v1beta1
types.pipeline_state = types.pipeline_state_v1beta1
types.prediction_service = types.prediction_service_v1beta1
@@ -102,6 +109,7 @@
types.tensorboard_service = types.tensorboard_service_v1beta1
types.tensorboard_time_series = types.tensorboard_time_series_v1beta1
types.training_pipeline = types.training_pipeline_v1beta1
+ types.types = types.types_v1beta1
if DEFAULT_VERSION == V1:
@@ -117,6 +125,8 @@
services.prediction_service_client = services.prediction_service_client_v1
services.specialist_pool_service_client = services.specialist_pool_service_client_v1
services.tensorboard_service_client = services.tensorboard_service_client_v1
+ services.index_service_client = services.index_service_client_v1
+ services.index_endpoint_service_client = services.index_endpoint_service_client_v1
types.accelerator_type = types.accelerator_type_v1
types.annotation = types.annotation_v1
@@ -147,6 +157,8 @@
types.featurestore_online_service = types.featurestore_online_service_v1
types.featurestore_service = types.featurestore_service_v1
types.hyperparameter_tuning_job = types.hyperparameter_tuning_job_v1
+ types.index = types.index_v1
+ types.index_endpoint = types.index_endpoint_v1
types.io = types.io_v1
types.job_service = types.job_service_v1
types.job_state = types.job_state_v1
@@ -155,8 +167,8 @@
types.matching_engine_deployed_index_ref = (
types.matching_engine_deployed_index_ref_v1
)
- types.matching_engine_index = types.matching_engine_index_v1
- types.matching_engine_index_endpoint = types.matching_engine_index_endpoint_v1
+ types.matching_engine_index = types.index_v1
+ types.matching_engine_index_endpoint = types.index_endpoint_v1
types.metadata_service = types.metadata_service_v1
types.metadata_store = types.metadata_store_v1
types.model = types.model_v1
@@ -164,6 +176,7 @@
types.model_evaluation_slice = types.model_evaluation_slice_v1
types.model_service = types.model_service_v1
types.operation = types.operation_v1
+ types.pipeline_job = types.pipeline_job_v1
types.pipeline_service = types.pipeline_service_v1
types.pipeline_state = types.pipeline_state_v1
types.prediction_service = types.prediction_service_v1
@@ -178,6 +191,7 @@
types.tensorboard_service = types.tensorboard_service_v1
types.tensorboard_time_series = types.tensorboard_time_series_v1
types.training_pipeline = types.training_pipeline_v1
+ types.types = types.types_v1
__all__ = (
DEFAULT_VERSION,
diff --git a/google/cloud/aiplatform/compat/types/__init__.py b/google/cloud/aiplatform/compat/types/__init__.py
index fc8e7c0b30..14ff93f011 100644
--- a/google/cloud/aiplatform/compat/types/__init__.py
+++ b/google/cloud/aiplatform/compat/types/__init__.py
@@ -46,8 +46,8 @@
featurestore_monitoring as featurestore_monitoring_v1beta1,
featurestore_online_service as featurestore_online_service_v1beta1,
featurestore_service as featurestore_service_v1beta1,
- index as matching_engine_index_v1beta1,
- index_endpoint as matching_engine_index_endpoint_v1beta1,
+ index as index_v1beta1,
+ index_endpoint as index_endpoint_v1beta1,
hyperparameter_tuning_job as hyperparameter_tuning_job_v1beta1,
io as io_v1beta1,
job_service as job_service_v1beta1,
@@ -75,6 +75,7 @@
tensorboard_service as tensorboard_service_v1beta1,
tensorboard_time_series as tensorboard_time_series_v1beta1,
training_pipeline as training_pipeline_v1beta1,
+ types as types_v1beta1,
)
from google.cloud.aiplatform_v1.types import (
accelerator_type as accelerator_type_v1,
@@ -107,8 +108,8 @@
featurestore_online_service as featurestore_online_service_v1,
featurestore_service as featurestore_service_v1,
hyperparameter_tuning_job as hyperparameter_tuning_job_v1,
- index as matching_engine_index_v1,
- index_endpoint as matching_engine_index_endpoint_v1,
+ index as index_v1,
+ index_endpoint as index_endpoint_v1,
io as io_v1,
job_service as job_service_v1,
job_state as job_state_v1,
@@ -135,6 +136,7 @@
tensorboard_service as tensorboard_service_v1,
tensorboard_time_series as tensorboard_time_series_v1,
training_pipeline as training_pipeline_v1,
+ types as types_v1,
)
__all__ = (
@@ -174,8 +176,8 @@
machine_resources_v1,
manual_batch_tuning_parameters_v1,
matching_engine_deployed_index_ref_v1,
- matching_engine_index_v1,
- matching_engine_index_endpoint_v1,
+ index_v1,
+ index_endpoint_v1,
metadata_service_v1,
metadata_store_v1,
model_v1,
@@ -196,6 +198,7 @@
tensorboard_service_v1,
tensorboard_time_series_v1,
training_pipeline_v1,
+ types_v1,
# v1beta1
accelerator_type_v1beta1,
annotation_v1beta1,
@@ -233,8 +236,8 @@
machine_resources_v1beta1,
manual_batch_tuning_parameters_v1beta1,
matching_engine_deployed_index_ref_v1beta1,
- matching_engine_index_v1beta1,
- matching_engine_index_endpoint_v1beta1,
+ index_v1beta1,
+ index_endpoint_v1beta1,
metadata_service_v1beta1,
metadata_store_v1beta1,
model_v1beta1,
@@ -255,4 +258,5 @@
tensorboard_service_v1beta1,
tensorboard_time_series_v1beta1,
training_pipeline_v1beta1,
+ types_v1beta1,
)
diff --git a/google/cloud/aiplatform/models.py b/google/cloud/aiplatform/models.py
index 95f3044cbe..1016b717bf 100644
--- a/google/cloud/aiplatform/models.py
+++ b/google/cloud/aiplatform/models.py
@@ -40,7 +40,6 @@
from google.cloud.aiplatform.compat.types import (
encryption_spec as gca_encryption_spec,
endpoint as gca_endpoint_compat,
- endpoint_v1 as gca_endpoint_v1,
explanation as gca_explanation_compat,
io as gca_io_compat,
machine_resources as gca_machine_resources_compat,
@@ -1382,7 +1381,7 @@ def list(
credentials=credentials,
)
- def list_models(self) -> Sequence[gca_endpoint_v1.DeployedModel]:
+ def list_models(self) -> Sequence[gca_endpoint_compat.DeployedModel]:
"""Returns a list of the models deployed to this Endpoint.
Returns:
diff --git a/google/cloud/aiplatform/pipeline_jobs.py b/google/cloud/aiplatform/pipeline_jobs.py
index 4c8a3ad806..90d7e0f86d 100644
--- a/google/cloud/aiplatform/pipeline_jobs.py
+++ b/google/cloud/aiplatform/pipeline_jobs.py
@@ -30,24 +30,22 @@
from google.protobuf import json_format
from google.cloud.aiplatform.compat.types import (
- pipeline_job_v1 as gca_pipeline_job_v1,
- pipeline_state_v1 as gca_pipeline_state_v1,
+ pipeline_job as gca_pipeline_job,
+ pipeline_state as gca_pipeline_state,
)
_LOGGER = base.Logger(__name__)
_PIPELINE_COMPLETE_STATES = set(
[
- gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_SUCCEEDED,
- gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_FAILED,
- gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_CANCELLED,
- gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_PAUSED,
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_FAILED,
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_CANCELLED,
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_PAUSED,
]
)
-_PIPELINE_ERROR_STATES = set(
- [gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_FAILED]
-)
+_PIPELINE_ERROR_STATES = set([gca_pipeline_state.PipelineState.PIPELINE_STATE_FAILED])
# Pattern for valid names used as a Vertex resource name.
_VALID_NAME_PATTERN = re.compile("^[a-z][-a-z0-9]{0,127}$")
@@ -205,7 +203,7 @@ def __init__(
builder.update_runtime_parameters(parameter_values)
runtime_config_dict = builder.build()
- runtime_config = gca_pipeline_job_v1.PipelineJob.RuntimeConfig()._pb
+ runtime_config = gca_pipeline_job.PipelineJob.RuntimeConfig()._pb
json_format.ParseDict(runtime_config_dict, runtime_config)
pipeline_name = pipeline_job["pipelineSpec"]["pipelineInfo"]["name"]
@@ -225,7 +223,7 @@ def __init__(
if enable_caching is not None:
_set_enable_caching_value(pipeline_job["pipelineSpec"], enable_caching)
- self._gca_resource = gca_pipeline_job_v1.PipelineJob(
+ self._gca_resource = gca_pipeline_job.PipelineJob(
display_name=display_name,
pipeline_spec=pipeline_job["pipelineSpec"],
labels=labels,
@@ -326,7 +324,7 @@ def pipeline_spec(self):
return self._gca_resource.pipeline_spec
@property
- def state(self) -> Optional[gca_pipeline_state_v1.PipelineState]:
+ def state(self) -> Optional[gca_pipeline_state.PipelineState]:
"""Current pipeline state."""
self._sync_gca_resource()
return self._gca_resource.state
@@ -337,7 +335,7 @@ def has_failed(self) -> bool:
False otherwise.
"""
- return self.state == gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_FAILED
+ return self.state == gca_pipeline_state.PipelineState.PIPELINE_STATE_FAILED
def _dashboard_uri(self) -> str:
"""Helper method to compose the dashboard uri where pipeline can be
diff --git a/tests/unit/aiplatform/test_automl_forecasting_training_jobs.py b/tests/unit/aiplatform/test_automl_forecasting_training_jobs.py
index 6a96d656e8..f7d0856460 100644
--- a/tests/unit/aiplatform/test_automl_forecasting_training_jobs.py
+++ b/tests/unit/aiplatform/test_automl_forecasting_training_jobs.py
@@ -26,13 +26,12 @@
from google.cloud.aiplatform import schema
from google.cloud.aiplatform.training_jobs import AutoMLForecastingTrainingJob
-from google.cloud.aiplatform_v1.services.model_service import (
- client as model_service_client,
+from google.cloud.aiplatform.compat.services import (
+ model_service_client,
+ pipeline_service_client,
)
-from google.cloud.aiplatform_v1.services.pipeline_service import (
- client as pipeline_service_client,
-)
-from google.cloud.aiplatform_v1.types import (
+
+from google.cloud.aiplatform.compat.types import (
dataset as gca_dataset,
model as gca_model,
pipeline_state as gca_pipeline_state,
diff --git a/tests/unit/aiplatform/test_automl_image_training_jobs.py b/tests/unit/aiplatform/test_automl_image_training_jobs.py
index 95e3c3f641..ae87741491 100644
--- a/tests/unit/aiplatform/test_automl_image_training_jobs.py
+++ b/tests/unit/aiplatform/test_automl_image_training_jobs.py
@@ -30,13 +30,11 @@
from google.cloud.aiplatform import schema
from google.cloud.aiplatform import training_jobs
-from google.cloud.aiplatform_v1.services.model_service import (
- client as model_service_client,
+from google.cloud.aiplatform.compat.services import (
+ model_service_client,
+ pipeline_service_client,
)
-from google.cloud.aiplatform_v1.services.pipeline_service import (
- client as pipeline_service_client,
-)
-from google.cloud.aiplatform_v1.types import (
+from google.cloud.aiplatform.compat.types import (
dataset as gca_dataset,
encryption_spec as gca_encryption_spec,
model as gca_model,
diff --git a/tests/unit/aiplatform/test_automl_tabular_training_jobs.py b/tests/unit/aiplatform/test_automl_tabular_training_jobs.py
index 3119793c14..cff46f3397 100644
--- a/tests/unit/aiplatform/test_automl_tabular_training_jobs.py
+++ b/tests/unit/aiplatform/test_automl_tabular_training_jobs.py
@@ -26,13 +26,12 @@
from google.cloud.aiplatform import schema
from google.cloud.aiplatform import training_jobs
-from google.cloud.aiplatform_v1.services.model_service import (
- client as model_service_client,
+from google.cloud.aiplatform.compat.services import (
+ model_service_client,
+ pipeline_service_client,
)
-from google.cloud.aiplatform_v1.services.pipeline_service import (
- client as pipeline_service_client,
-)
-from google.cloud.aiplatform_v1.types import (
+
+from google.cloud.aiplatform.compat.types import (
dataset as gca_dataset,
encryption_spec as gca_encryption_spec,
model as gca_model,
diff --git a/tests/unit/aiplatform/test_automl_text_training_jobs.py b/tests/unit/aiplatform/test_automl_text_training_jobs.py
index 7712c758fa..5204de45da 100644
--- a/tests/unit/aiplatform/test_automl_text_training_jobs.py
+++ b/tests/unit/aiplatform/test_automl_text_training_jobs.py
@@ -27,13 +27,11 @@
from google.cloud.aiplatform import schema
from google.cloud.aiplatform import training_jobs
-from google.cloud.aiplatform_v1.services.model_service import (
- client as model_service_client,
+from google.cloud.aiplatform.compat.services import (
+ model_service_client,
+ pipeline_service_client,
)
-from google.cloud.aiplatform_v1.services.pipeline_service import (
- client as pipeline_service_client,
-)
-from google.cloud.aiplatform_v1.types import (
+from google.cloud.aiplatform.compat.types import (
dataset as gca_dataset,
encryption_spec as gca_encryption_spec,
model as gca_model,
diff --git a/tests/unit/aiplatform/test_automl_video_training_jobs.py b/tests/unit/aiplatform/test_automl_video_training_jobs.py
index b8abf02058..c65acec821 100644
--- a/tests/unit/aiplatform/test_automl_video_training_jobs.py
+++ b/tests/unit/aiplatform/test_automl_video_training_jobs.py
@@ -30,13 +30,11 @@
from google.cloud.aiplatform import schema
from google.cloud.aiplatform import training_jobs
-from google.cloud.aiplatform_v1.services.model_service import (
- client as model_service_client,
+from google.cloud.aiplatform.compat.services import (
+ model_service_client,
+ pipeline_service_client,
)
-from google.cloud.aiplatform_v1.services.pipeline_service import (
- client as pipeline_service_client,
-)
-from google.cloud.aiplatform_v1.types import (
+from google.cloud.aiplatform.compat.types import (
dataset as gca_dataset,
encryption_spec as gca_encryption_spec,
model as gca_model,
diff --git a/tests/unit/aiplatform/test_custom_job.py b/tests/unit/aiplatform/test_custom_job.py
index c0062a65e9..fea6f70c38 100644
--- a/tests/unit/aiplatform/test_custom_job.py
+++ b/tests/unit/aiplatform/test_custom_job.py
@@ -36,7 +36,7 @@
from google.cloud.aiplatform.compat.types import (
encryption_spec as gca_encryption_spec_compat,
)
-from google.cloud.aiplatform_v1.services.job_service import client as job_service_client
+from google.cloud.aiplatform.compat.services import job_service_client
_TEST_PROJECT = "test-project"
_TEST_LOCATION = "us-central1"
diff --git a/tests/unit/aiplatform/test_datasets.py b/tests/unit/aiplatform/test_datasets.py
index 13ef13aebd..29dd841a52 100644
--- a/tests/unit/aiplatform/test_datasets.py
+++ b/tests/unit/aiplatform/test_datasets.py
@@ -39,11 +39,9 @@
from google.cloud import bigquery
from google.cloud import storage
-from google.cloud.aiplatform_v1.services.dataset_service import (
- client as dataset_service_client,
-)
+from google.cloud.aiplatform.compat.services import dataset_service_client
-from google.cloud.aiplatform_v1.types import (
+from google.cloud.aiplatform.compat.types import (
dataset as gca_dataset,
dataset_service as gca_dataset_service,
encryption_spec as gca_encryption_spec,
diff --git a/tests/unit/aiplatform/test_end_to_end.py b/tests/unit/aiplatform/test_end_to_end.py
index c31b17ab1c..97a95ef6dd 100644
--- a/tests/unit/aiplatform/test_end_to_end.py
+++ b/tests/unit/aiplatform/test_end_to_end.py
@@ -25,7 +25,7 @@
from google.cloud.aiplatform import models
from google.cloud.aiplatform import schema
-from google.cloud.aiplatform_v1.types import (
+from google.cloud.aiplatform.compat.types import (
dataset as gca_dataset,
encryption_spec as gca_encryption_spec,
io as gca_io,
diff --git a/tests/unit/aiplatform/test_endpoints.py b/tests/unit/aiplatform/test_endpoints.py
index 23c4781406..e0aba5ed39 100644
--- a/tests/unit/aiplatform/test_endpoints.py
+++ b/tests/unit/aiplatform/test_endpoints.py
@@ -32,14 +32,10 @@
from google.cloud.aiplatform import models
from google.cloud.aiplatform import utils
-from google.cloud.aiplatform_v1.services.model_service import (
- client as model_service_client,
-)
-from google.cloud.aiplatform_v1.services.endpoint_service import (
- client as endpoint_service_client,
-)
-from google.cloud.aiplatform_v1.services.prediction_service import (
- client as prediction_service_client,
+from google.cloud.aiplatform.compat.services import (
+ model_service_client,
+ endpoint_service_client,
+ prediction_service_client,
)
from google.cloud.aiplatform.compat.types import (
endpoint as gca_endpoint,
diff --git a/tests/unit/aiplatform/test_explain_lit.py b/tests/unit/aiplatform/test_explain_lit.py
index c4cc538868..e5a03c2a2e 100644
--- a/tests/unit/aiplatform/test_explain_lit.py
+++ b/tests/unit/aiplatform/test_explain_lit.py
@@ -36,11 +36,9 @@
open_lit,
set_up_and_open_lit,
)
-from google.cloud.aiplatform_v1.services.endpoint_service import (
- client as endpoint_service_client,
-)
-from google.cloud.aiplatform_v1.services.prediction_service import (
- client as prediction_service_client,
+from google.cloud.aiplatform.compat.services import (
+ endpoint_service_client,
+ prediction_service_client,
)
from importlib import reload
from lit_nlp.api import types as lit_types
diff --git a/tests/unit/aiplatform/test_featurestores.py b/tests/unit/aiplatform/test_featurestores.py
index 3691189325..4c556abd17 100644
--- a/tests/unit/aiplatform/test_featurestores.py
+++ b/tests/unit/aiplatform/test_featurestores.py
@@ -35,13 +35,11 @@
from google.cloud.aiplatform.utils import resource_manager_utils
from google.cloud.aiplatform.utils import featurestore_utils
-from google.cloud.aiplatform_v1.services.featurestore_service import (
- client as featurestore_service_client,
+from google.cloud.aiplatform.compat.services import (
+ featurestore_service_client,
+ featurestore_online_serving_service_client,
)
-from google.cloud.aiplatform_v1.services.featurestore_online_serving_service import (
- client as featurestore_online_serving_service_client,
-)
-from google.cloud.aiplatform_v1.types import (
+from google.cloud.aiplatform.compat.types import (
encryption_spec as gca_encryption_spec,
entity_type as gca_entity_type,
feature as gca_feature,
diff --git a/tests/unit/aiplatform/test_hyperparameter_tuning_job.py b/tests/unit/aiplatform/test_hyperparameter_tuning_job.py
index 727f106fb5..601079d751 100644
--- a/tests/unit/aiplatform/test_hyperparameter_tuning_job.py
+++ b/tests/unit/aiplatform/test_hyperparameter_tuning_job.py
@@ -33,7 +33,7 @@
job_state as gca_job_state_compat,
study as gca_study_compat,
)
-from google.cloud.aiplatform_v1.services.job_service import client as job_service_client
+from google.cloud.aiplatform.compat.services import job_service_client
import test_custom_job
diff --git a/tests/unit/aiplatform/test_initializer.py b/tests/unit/aiplatform/test_initializer.py
index d7e3e3ad8c..a74c4d02cb 100644
--- a/tests/unit/aiplatform/test_initializer.py
+++ b/tests/unit/aiplatform/test_initializer.py
@@ -30,8 +30,8 @@
from google.cloud.aiplatform import utils
from google.cloud.aiplatform.utils import resource_manager_utils
-from google.cloud.aiplatform_v1.services.model_service import (
- client as model_service_client,
+from google.cloud.aiplatform.compat.services import (
+ model_service_client,
)
_TEST_PROJECT = "test-project"
diff --git a/tests/unit/aiplatform/test_jobs.py b/tests/unit/aiplatform/test_jobs.py
index 73a4f8da0c..c6cc6ffd03 100644
--- a/tests/unit/aiplatform/test_jobs.py
+++ b/tests/unit/aiplatform/test_jobs.py
@@ -40,7 +40,9 @@
manual_batch_tuning_parameters as gca_manual_batch_tuning_parameters_compat,
)
-from google.cloud.aiplatform_v1.services.job_service import client as job_service_client
+from google.cloud.aiplatform.compat.services import (
+ job_service_client,
+)
_TEST_API_CLIENT = job_service_client.JobServiceClient
diff --git a/tests/unit/aiplatform/test_matching_engine_index.py b/tests/unit/aiplatform/test_matching_engine_index.py
index bf4c3d1232..3e415ce8af 100644
--- a/tests/unit/aiplatform/test_matching_engine_index.py
+++ b/tests/unit/aiplatform/test_matching_engine_index.py
@@ -28,11 +28,11 @@
from google.cloud import aiplatform
from google.cloud.aiplatform import base
from google.cloud.aiplatform import initializer
-from google.cloud.aiplatform_v1.services.index_service import (
- client as index_service_client,
+from google.cloud.aiplatform.compat.services import (
+ index_service_client,
)
-from google.cloud.aiplatform_v1.types import index as gca_index
+from google.cloud.aiplatform.compat.types import index as gca_index
# project
_TEST_PROJECT = "test-project"
diff --git a/tests/unit/aiplatform/test_matching_engine_index_endpoint.py b/tests/unit/aiplatform/test_matching_engine_index_endpoint.py
index cd3dd78ed2..516d1f3766 100644
--- a/tests/unit/aiplatform/test_matching_engine_index_endpoint.py
+++ b/tests/unit/aiplatform/test_matching_engine_index_endpoint.py
@@ -26,18 +26,15 @@
from google.cloud.aiplatform import initializer
from google.cloud.aiplatform.compat.types import (
matching_engine_deployed_index_ref as gca_matching_engine_deployed_index_ref,
- matching_engine_index_endpoint as gca_matching_engine_index_endpoint,
-)
-from google.cloud.aiplatform_v1.services.index_endpoint_service import (
- client as index_endpoint_service_client,
-)
-from google.cloud.aiplatform_v1.services.index_service import (
- client as index_service_client,
-)
-from google.cloud.aiplatform_v1.types import (
- index as gca_index,
index_endpoint as gca_index_endpoint,
+ index as gca_index,
+)
+
+from google.cloud.aiplatform.compat.services import (
+ index_endpoint_service_client,
+ index_service_client,
)
+
from google.protobuf import field_mask_pb2
import pytest
@@ -254,7 +251,7 @@ def get_index_endpoint_mock():
description=_TEST_INDEX_ENDPOINT_DESCRIPTION,
)
index_endpoint.deployed_indexes = [
- gca_matching_engine_index_endpoint.DeployedIndex(
+ gca_index_endpoint.DeployedIndex(
id=_TEST_DEPLOYED_INDEX_ID,
index=_TEST_INDEX_NAME,
display_name=_TEST_DEPLOYED_INDEX_DISPLAY_NAME,
@@ -265,14 +262,14 @@ def get_index_endpoint_mock():
"min_replica_count": _TEST_MIN_REPLICA_COUNT,
"max_replica_count": _TEST_MAX_REPLICA_COUNT,
},
- deployed_index_auth_config=gca_matching_engine_index_endpoint.DeployedIndexAuthConfig(
- auth_provider=gca_matching_engine_index_endpoint.DeployedIndexAuthConfig.AuthProvider(
+ deployed_index_auth_config=gca_index_endpoint.DeployedIndexAuthConfig(
+ auth_provider=gca_index_endpoint.DeployedIndexAuthConfig.AuthProvider(
audiences=_TEST_AUTH_CONFIG_AUDIENCES,
allowed_issuers=_TEST_AUTH_CONFIG_ALLOWED_ISSUERS,
)
),
),
- gca_matching_engine_index_endpoint.DeployedIndex(
+ gca_index_endpoint.DeployedIndex(
id=f"{_TEST_DEPLOYED_INDEX_ID}_2",
index=f"{_TEST_INDEX_NAME}_2",
display_name=_TEST_DEPLOYED_INDEX_DISPLAY_NAME,
@@ -283,8 +280,8 @@ def get_index_endpoint_mock():
"min_replica_count": _TEST_MIN_REPLICA_COUNT,
"max_replica_count": _TEST_MAX_REPLICA_COUNT,
},
- deployed_index_auth_config=gca_matching_engine_index_endpoint.DeployedIndexAuthConfig(
- auth_provider=gca_matching_engine_index_endpoint.DeployedIndexAuthConfig.AuthProvider(
+ deployed_index_auth_config=gca_index_endpoint.DeployedIndexAuthConfig(
+ auth_provider=gca_index_endpoint.DeployedIndexAuthConfig.AuthProvider(
audiences=_TEST_AUTH_CONFIG_AUDIENCES,
allowed_issuers=_TEST_AUTH_CONFIG_ALLOWED_ISSUERS,
)
@@ -519,7 +516,7 @@ def test_deploy_index(self, deploy_index_mock, undeploy_index_mock):
deploy_index_mock.assert_called_once_with(
index_endpoint=my_index_endpoint.resource_name,
- deployed_index=gca_matching_engine_index_endpoint.DeployedIndex(
+ deployed_index=gca_index_endpoint.DeployedIndex(
id=_TEST_DEPLOYED_INDEX_ID,
index=my_index.resource_name,
display_name=_TEST_DEPLOYED_INDEX_DISPLAY_NAME,
@@ -530,8 +527,8 @@ def test_deploy_index(self, deploy_index_mock, undeploy_index_mock):
"min_replica_count": _TEST_MIN_REPLICA_COUNT,
"max_replica_count": _TEST_MAX_REPLICA_COUNT,
},
- deployed_index_auth_config=gca_matching_engine_index_endpoint.DeployedIndexAuthConfig(
- auth_provider=gca_matching_engine_index_endpoint.DeployedIndexAuthConfig.AuthProvider(
+ deployed_index_auth_config=gca_index_endpoint.DeployedIndexAuthConfig(
+ auth_provider=gca_index_endpoint.DeployedIndexAuthConfig.AuthProvider(
audiences=_TEST_AUTH_CONFIG_AUDIENCES,
allowed_issuers=_TEST_AUTH_CONFIG_ALLOWED_ISSUERS,
)
@@ -567,7 +564,7 @@ def test_mutate_deployed_index(self, mutate_deployed_index_mock):
mutate_deployed_index_mock.assert_called_once_with(
index_endpoint=_TEST_INDEX_ENDPOINT_NAME,
- deployed_index=gca_matching_engine_index_endpoint.DeployedIndex(
+ deployed_index=gca_index_endpoint.DeployedIndex(
id=_TEST_DEPLOYED_INDEX_ID,
automatic_resources={
"min_replica_count": _TEST_MIN_REPLICA_COUNT_UPDATED,
diff --git a/tests/unit/aiplatform/test_metadata_store.py b/tests/unit/aiplatform/test_metadata_store.py
index 658972f689..4d0ab6819c 100644
--- a/tests/unit/aiplatform/test_metadata_store.py
+++ b/tests/unit/aiplatform/test_metadata_store.py
@@ -31,8 +31,8 @@
from google.cloud.aiplatform.metadata import metadata_store
from google.cloud.aiplatform_v1 import MetadataServiceClient
from google.cloud.aiplatform_v1 import MetadataStore as GapicMetadataStore
-from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec
-from google.cloud.aiplatform_v1.types import metadata_service
+from google.cloud.aiplatform.compat.types import encryption_spec as gca_encryption_spec
+from google.cloud.aiplatform.compat.types import metadata_service
# project
_TEST_PROJECT = "test-project"
diff --git a/tests/unit/aiplatform/test_model_evaluation.py b/tests/unit/aiplatform/test_model_evaluation.py
index c5c5cd9ac3..91cbd77509 100644
--- a/tests/unit/aiplatform/test_model_evaluation.py
+++ b/tests/unit/aiplatform/test_model_evaluation.py
@@ -23,14 +23,16 @@
from google.cloud.aiplatform import base
from google.cloud.aiplatform import models
-from google.cloud.aiplatform_v1.services.model_service import (
- client as model_service_client,
+from google.cloud.aiplatform.compat.services import (
+ model_service_client,
)
from google.cloud.aiplatform.compat.types import model as gca_model
-from google.cloud.aiplatform_v1.types import model_evaluation as gca_model_evaluation
+from google.cloud.aiplatform.compat.types import (
+ model_evaluation as gca_model_evaluation,
+)
_TEST_PROJECT = "test-project"
_TEST_LOCATION = "us-central1"
diff --git a/tests/unit/aiplatform/test_models.py b/tests/unit/aiplatform/test_models.py
index eaf63d9fdd..206fb59f6f 100644
--- a/tests/unit/aiplatform/test_models.py
+++ b/tests/unit/aiplatform/test_models.py
@@ -32,12 +32,10 @@
from google.cloud.aiplatform import models
from google.cloud.aiplatform import utils
-from google.cloud.aiplatform_v1.services.endpoint_service import (
- client as endpoint_service_client,
-)
-from google.cloud.aiplatform_v1.services.job_service import client as job_service_client
-from google.cloud.aiplatform_v1.services.model_service import (
- client as model_service_client,
+from google.cloud.aiplatform.compat.services import (
+ endpoint_service_client,
+ model_service_client,
+ job_service_client,
)
from google.cloud.aiplatform.compat.services import pipeline_service_client
from google.cloud.aiplatform.compat.types import (
diff --git a/tests/unit/aiplatform/test_pipeline_jobs.py b/tests/unit/aiplatform/test_pipeline_jobs.py
index df5e294b03..090cfa62bc 100644
--- a/tests/unit/aiplatform/test_pipeline_jobs.py
+++ b/tests/unit/aiplatform/test_pipeline_jobs.py
@@ -32,12 +32,12 @@
from google.cloud import storage
from google.protobuf import json_format
-from google.cloud.aiplatform_v1.services.pipeline_service import (
- client as pipeline_service_client_v1,
+from google.cloud.aiplatform.compat.services import (
+ pipeline_service_client,
)
-from google.cloud.aiplatform_v1.types import (
- pipeline_job as gca_pipeline_job_v1,
- pipeline_state as gca_pipeline_state_v1,
+from google.cloud.aiplatform.compat.types import (
+ pipeline_job as gca_pipeline_job,
+ pipeline_state as gca_pipeline_state,
)
_TEST_PROJECT = "test-project"
@@ -188,11 +188,11 @@
@pytest.fixture
def mock_pipeline_service_create():
with mock.patch.object(
- pipeline_service_client_v1.PipelineServiceClient, "create_pipeline_job"
+ pipeline_service_client.PipelineServiceClient, "create_pipeline_job"
) as mock_create_pipeline_job:
- mock_create_pipeline_job.return_value = gca_pipeline_job_v1.PipelineJob(
+ mock_create_pipeline_job.return_value = gca_pipeline_job.PipelineJob(
name=_TEST_PIPELINE_JOB_NAME,
- state=gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
create_time=_TEST_PIPELINE_CREATE_TIME,
service_account=_TEST_SERVICE_ACCOUNT,
network=_TEST_NETWORK,
@@ -201,7 +201,7 @@ def mock_pipeline_service_create():
def make_pipeline_job(state):
- return gca_pipeline_job_v1.PipelineJob(
+ return gca_pipeline_job.PipelineJob(
name=_TEST_PIPELINE_JOB_NAME,
state=state,
create_time=_TEST_PIPELINE_CREATE_TIME,
@@ -213,35 +213,33 @@ def make_pipeline_job(state):
@pytest.fixture
def mock_pipeline_service_get():
with mock.patch.object(
- pipeline_service_client_v1.PipelineServiceClient, "get_pipeline_job"
+ pipeline_service_client.PipelineServiceClient, "get_pipeline_job"
) as mock_get_pipeline_job:
mock_get_pipeline_job.side_effect = [
+ make_pipeline_job(gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING),
make_pipeline_job(
- gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_RUNNING
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
),
make_pipeline_job(
- gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_SUCCEEDED
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
),
make_pipeline_job(
- gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_SUCCEEDED
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
),
make_pipeline_job(
- gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_SUCCEEDED
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
),
make_pipeline_job(
- gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_SUCCEEDED
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
),
make_pipeline_job(
- gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_SUCCEEDED
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
),
make_pipeline_job(
- gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_SUCCEEDED
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
),
make_pipeline_job(
- gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_SUCCEEDED
- ),
- make_pipeline_job(
- gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_SUCCEEDED
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
),
]
@@ -251,18 +249,12 @@ def mock_pipeline_service_get():
@pytest.fixture
def mock_pipeline_service_get_with_fail():
with mock.patch.object(
- pipeline_service_client_v1.PipelineServiceClient, "get_pipeline_job"
+ pipeline_service_client.PipelineServiceClient, "get_pipeline_job"
) as mock_get_pipeline_job:
mock_get_pipeline_job.side_effect = [
- make_pipeline_job(
- gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_RUNNING
- ),
- make_pipeline_job(
- gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_RUNNING
- ),
- make_pipeline_job(
- gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_FAILED
- ),
+ make_pipeline_job(gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING),
+ make_pipeline_job(gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING),
+ make_pipeline_job(gca_pipeline_state.PipelineState.PIPELINE_STATE_FAILED),
]
yield mock_get_pipeline_job
@@ -271,7 +263,7 @@ def mock_pipeline_service_get_with_fail():
@pytest.fixture
def mock_pipeline_service_cancel():
with mock.patch.object(
- pipeline_service_client_v1.PipelineServiceClient, "cancel_pipeline_job"
+ pipeline_service_client.PipelineServiceClient, "cancel_pipeline_job"
) as mock_cancel_pipeline_job:
yield mock_cancel_pipeline_job
@@ -279,7 +271,7 @@ def mock_pipeline_service_cancel():
@pytest.fixture
def mock_pipeline_service_list():
with mock.patch.object(
- pipeline_service_client_v1.PipelineServiceClient, "list_pipeline_jobs"
+ pipeline_service_client.PipelineServiceClient, "list_pipeline_jobs"
) as mock_list_pipeline_jobs:
yield mock_list_pipeline_jobs
@@ -351,14 +343,14 @@ def test_run_call_pipeline_service_create(
"gcsOutputDirectory": _TEST_GCS_BUCKET_NAME,
"parameterValues": _TEST_PIPELINE_PARAMETER_VALUES,
}
- runtime_config = gca_pipeline_job_v1.PipelineJob.RuntimeConfig()._pb
+ runtime_config = gca_pipeline_job.PipelineJob.RuntimeConfig()._pb
json_format.ParseDict(expected_runtime_config_dict, runtime_config)
job_spec = yaml.safe_load(job_spec)
pipeline_spec = job_spec.get("pipelineSpec") or job_spec
# Construct expected request
- expected_gapic_pipeline_job = gca_pipeline_job_v1.PipelineJob(
+ expected_gapic_pipeline_job = gca_pipeline_job.PipelineJob(
display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
pipeline_spec={
"components": {},
@@ -383,7 +375,7 @@ def test_run_call_pipeline_service_create(
)
assert job._gca_resource == make_pipeline_job(
- gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_SUCCEEDED
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
)
@pytest.mark.parametrize(
@@ -432,14 +424,14 @@ def test_run_call_pipeline_service_create_with_timeout(
"gcsOutputDirectory": _TEST_GCS_BUCKET_NAME,
"parameterValues": _TEST_PIPELINE_PARAMETER_VALUES,
}
- runtime_config = gca_pipeline_job_v1.PipelineJob.RuntimeConfig()._pb
+ runtime_config = gca_pipeline_job.PipelineJob.RuntimeConfig()._pb
json_format.ParseDict(expected_runtime_config_dict, runtime_config)
job_spec = yaml.safe_load(job_spec)
pipeline_spec = job_spec.get("pipelineSpec") or job_spec
# Construct expected request
- expected_gapic_pipeline_job = gca_pipeline_job_v1.PipelineJob(
+ expected_gapic_pipeline_job = gca_pipeline_job.PipelineJob(
display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
pipeline_spec={
"components": {},
@@ -464,7 +456,7 @@ def test_run_call_pipeline_service_create_with_timeout(
# )
# assert job._gca_resource == make_pipeline_job(
- # gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_SUCCEEDED
+ # gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
# )
@pytest.mark.parametrize(
@@ -512,14 +504,14 @@ def test_run_call_pipeline_service_create_with_timeout_not_explicitly_set(
"gcsOutputDirectory": _TEST_GCS_BUCKET_NAME,
"parameterValues": _TEST_PIPELINE_PARAMETER_VALUES,
}
- runtime_config = gca_pipeline_job_v1.PipelineJob.RuntimeConfig()._pb
+ runtime_config = gca_pipeline_job.PipelineJob.RuntimeConfig()._pb
json_format.ParseDict(expected_runtime_config_dict, runtime_config)
job_spec = yaml.safe_load(job_spec)
pipeline_spec = job_spec.get("pipelineSpec") or job_spec
# Construct expected request
- expected_gapic_pipeline_job = gca_pipeline_job_v1.PipelineJob(
+ expected_gapic_pipeline_job = gca_pipeline_job.PipelineJob(
display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
pipeline_spec={
"components": {},
@@ -585,14 +577,14 @@ def test_run_call_pipeline_service_create_legacy(
"gcsOutputDirectory": _TEST_GCS_BUCKET_NAME,
"parameters": {"string_param": {"stringValue": "hello"}},
}
- runtime_config = gca_pipeline_job_v1.PipelineJob.RuntimeConfig()._pb
+ runtime_config = gca_pipeline_job.PipelineJob.RuntimeConfig()._pb
json_format.ParseDict(expected_runtime_config_dict, runtime_config)
job_spec = yaml.safe_load(job_spec)
pipeline_spec = job_spec.get("pipelineSpec") or job_spec
# Construct expected request
- expected_gapic_pipeline_job = gca_pipeline_job_v1.PipelineJob(
+ expected_gapic_pipeline_job = gca_pipeline_job.PipelineJob(
display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
pipeline_spec={
"components": {},
@@ -617,7 +609,7 @@ def test_run_call_pipeline_service_create_legacy(
)
assert job._gca_resource == make_pipeline_job(
- gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_SUCCEEDED
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
)
@pytest.mark.parametrize(
@@ -666,14 +658,14 @@ def test_run_call_pipeline_service_create_tfx(
"gcsOutputDirectory": _TEST_GCS_BUCKET_NAME,
"parameters": {"string_param": {"stringValue": "hello"}},
}
- runtime_config = gca_pipeline_job_v1.PipelineJob.RuntimeConfig()._pb
+ runtime_config = gca_pipeline_job.PipelineJob.RuntimeConfig()._pb
json_format.ParseDict(expected_runtime_config_dict, runtime_config)
job_spec = yaml.safe_load(job_spec)
pipeline_spec = job_spec.get("pipelineSpec") or job_spec
# Construct expected request
- expected_gapic_pipeline_job = gca_pipeline_job_v1.PipelineJob(
+ expected_gapic_pipeline_job = gca_pipeline_job.PipelineJob(
display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
pipeline_spec={
"components": {},
@@ -699,7 +691,7 @@ def test_run_call_pipeline_service_create_tfx(
)
assert job._gca_resource == make_pipeline_job(
- gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_SUCCEEDED
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
)
@pytest.mark.parametrize(
@@ -738,14 +730,14 @@ def test_submit_call_pipeline_service_pipeline_job_create(
"gcsOutputDirectory": _TEST_GCS_BUCKET_NAME,
"parameterValues": _TEST_PIPELINE_PARAMETER_VALUES,
}
- runtime_config = gca_pipeline_job_v1.PipelineJob.RuntimeConfig()._pb
+ runtime_config = gca_pipeline_job.PipelineJob.RuntimeConfig()._pb
json_format.ParseDict(expected_runtime_config_dict, runtime_config)
job_spec = yaml.safe_load(job_spec)
pipeline_spec = job_spec.get("pipelineSpec") or job_spec
# Construct expected request
- expected_gapic_pipeline_job = gca_pipeline_job_v1.PipelineJob(
+ expected_gapic_pipeline_job = gca_pipeline_job.PipelineJob(
display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
pipeline_spec={
"components": {},
@@ -774,7 +766,7 @@ def test_submit_call_pipeline_service_pipeline_job_create(
)
assert job._gca_resource == make_pipeline_job(
- gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_SUCCEEDED
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
)
@pytest.mark.parametrize(
@@ -851,14 +843,14 @@ def test_submit_call_pipeline_service_pipeline_job_create_legacy(
"parameters": {"string_param": {"stringValue": "hello"}},
"gcsOutputDirectory": _TEST_GCS_BUCKET_NAME,
}
- runtime_config = gca_pipeline_job_v1.PipelineJob.RuntimeConfig()._pb
+ runtime_config = gca_pipeline_job.PipelineJob.RuntimeConfig()._pb
json_format.ParseDict(expected_runtime_config_dict, runtime_config)
job_spec = yaml.safe_load(job_spec)
pipeline_spec = job_spec.get("pipelineSpec") or job_spec
# Construct expected request
- expected_gapic_pipeline_job = gca_pipeline_job_v1.PipelineJob(
+ expected_gapic_pipeline_job = gca_pipeline_job.PipelineJob(
display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
pipeline_spec={
"components": {},
@@ -887,7 +879,7 @@ def test_submit_call_pipeline_service_pipeline_job_create_legacy(
)
assert job._gca_resource == make_pipeline_job(
- gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_SUCCEEDED
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
)
@pytest.mark.usefixtures("mock_pipeline_service_get")
diff --git a/tests/unit/aiplatform/test_tensorboard.py b/tests/unit/aiplatform/test_tensorboard.py
index 40b20c7b70..bea5d3f2f4 100644
--- a/tests/unit/aiplatform/test_tensorboard.py
+++ b/tests/unit/aiplatform/test_tensorboard.py
@@ -32,11 +32,11 @@
from google.cloud.aiplatform import initializer
from google.cloud.aiplatform import tensorboard
-from google.cloud.aiplatform_v1.services.tensorboard_service import (
- client as tensorboard_service_client,
+from google.cloud.aiplatform.compat.services import (
+ tensorboard_service_client,
)
-from google.cloud.aiplatform_v1.types import (
+from google.cloud.aiplatform.compat.types import (
encryption_spec as gca_encryption_spec,
tensorboard as gca_tensorboard,
tensorboard_experiment as gca_tensorboard_experiment,
diff --git a/tests/unit/aiplatform/test_training_jobs.py b/tests/unit/aiplatform/test_training_jobs.py
index 21d9baac3e..7048b0ca16 100644
--- a/tests/unit/aiplatform/test_training_jobs.py
+++ b/tests/unit/aiplatform/test_training_jobs.py
@@ -45,16 +45,13 @@
from google.cloud.aiplatform.utils import source_utils
from google.cloud.aiplatform.utils import worker_spec_utils
-
-from google.cloud.aiplatform_v1.services.job_service import client as job_service_client
-from google.cloud.aiplatform_v1.services.model_service import (
- client as model_service_client,
-)
-from google.cloud.aiplatform_v1.services.pipeline_service import (
- client as pipeline_service_client,
+from google.cloud.aiplatform.compat.services import (
+ model_service_client,
+ pipeline_service_client,
+ job_service_client,
)
-from google.cloud.aiplatform_v1.types import (
+from google.cloud.aiplatform.compat.types import (
custom_job as gca_custom_job,
dataset as gca_dataset,
encryption_spec as gca_encryption_spec,
diff --git a/tests/unit/aiplatform/test_uploader.py b/tests/unit/aiplatform/test_uploader.py
index 29c435d40f..44cb1bb11a 100644
--- a/tests/unit/aiplatform/test_uploader.py
+++ b/tests/unit/aiplatform/test_uploader.py
@@ -46,19 +46,20 @@
from google.cloud.aiplatform.tensorboard.plugins.tf_profiler import profile_uploader
import google.cloud.aiplatform.tensorboard.uploader as uploader_lib
from google.cloud import storage
-from google.cloud.aiplatform_v1.services.tensorboard_service import (
- client as tensorboard_service_client,
+from google.cloud.aiplatform.compat.services import (
+ tensorboard_service_client,
)
from google.cloud.aiplatform_v1.services.tensorboard_service.transports import (
grpc as transports_grpc,
)
-from google.cloud.aiplatform_v1.types import tensorboard_data
-from google.cloud.aiplatform_v1.types import tensorboard_service
-from google.cloud.aiplatform_v1.types import (
+
+from google.cloud.aiplatform.compat.types import tensorboard_data
+from google.cloud.aiplatform.compat.types import tensorboard_service
+from google.cloud.aiplatform.compat.types import (
tensorboard_experiment as tensorboard_experiment_type,
)
-from google.cloud.aiplatform_v1.types import tensorboard_run as tensorboard_run_type
-from google.cloud.aiplatform_v1.types import (
+from google.cloud.aiplatform.compat.types import tensorboard_run as tensorboard_run_type
+from google.cloud.aiplatform.compat.types import (
tensorboard_time_series as tensorboard_time_series_type,
)
from google.protobuf import timestamp_pb2
diff --git a/tests/unit/aiplatform/test_uploader_main.py b/tests/unit/aiplatform/test_uploader_main.py
index 79c86b22fc..bd3ae404d8 100644
--- a/tests/unit/aiplatform/test_uploader_main.py
+++ b/tests/unit/aiplatform/test_uploader_main.py
@@ -25,7 +25,9 @@
from google.cloud.aiplatform.tensorboard import uploader_main
from google.cloud.aiplatform.compat.types import job_state as gca_job_state_compat
from google.cloud.aiplatform.compat.types import custom_job as gca_custom_job_compat
-from google.cloud.aiplatform_v1.services.job_service import client as job_service_client
+from google.cloud.aiplatform.compat.services import (
+ job_service_client,
+)
_TEST_PROJECT = "test-project"
_TEST_LOCATION = "us-central1"
From 0c4ead4e40680c2ecb4a98a7dab91eaaf4c8696c Mon Sep 17 00:00:00 2001
From: Sara Robinson
Date: Fri, 20 May 2022 15:56:30 -0400
Subject: [PATCH 06/14] chore: add teardown resource fixture to system tests
(#1247)
---
tests/system/aiplatform/test_dataset.py | 1 +
tests/system/aiplatform/test_featurestore.py | 1 +
2 files changed, 2 insertions(+)
diff --git a/tests/system/aiplatform/test_dataset.py b/tests/system/aiplatform/test_dataset.py
index f152ed0e32..54e2528e1f 100644
--- a/tests/system/aiplatform/test_dataset.py
+++ b/tests/system/aiplatform/test_dataset.py
@@ -189,6 +189,7 @@ def test_get_new_dataset_and_import(self, dataset_gapic_client):
my_dataset.import_data(
gcs_source=_TEST_TEXT_ENTITY_EXTRACTION_GCS_SOURCE,
import_schema_uri=_TEST_TEXT_ENTITY_IMPORT_SCHEMA,
+ import_request_timeout=500,
)
data_items_post_import = dataset_gapic_client.list_data_items(
diff --git a/tests/system/aiplatform/test_featurestore.py b/tests/system/aiplatform/test_featurestore.py
index a7abf3ce84..9573ea6556 100644
--- a/tests/system/aiplatform/test_featurestore.py
+++ b/tests/system/aiplatform/test_featurestore.py
@@ -51,6 +51,7 @@
"delete_staging_bucket",
"prepare_bigquery_dataset",
"delete_bigquery_dataset",
+ "tear_down_resources",
)
class TestFeaturestore(e2e_base.TestEndToEnd):
From 15bc80b7e02065dacbbba8915e52b624431458be Mon Sep 17 00:00:00 2001
From: Sara Robinson
Date: Mon, 23 May 2022 09:36:57 -0400
Subject: [PATCH 07/14] chore: add unit test mock for AnonymousCredentials
(#1245)
* initial work on adding google auth mock
* add anonymouscredentials mock to more tests, run linter
* add auth mock to more test classes
* remove auth mock from two test classes
---
tests/unit/aiplatform/conftest.py | 32 +++++++++++++++++++
.../test_automl_forecasting_training_jobs.py | 1 +
.../test_automl_image_training_jobs.py | 1 +
.../test_automl_tabular_training_jobs.py | 1 +
.../test_automl_text_training_jobs.py | 1 +
.../test_automl_video_training_jobs.py | 1 +
tests/unit/aiplatform/test_custom_job.py | 1 +
tests/unit/aiplatform/test_datasets.py | 16 ++--------
tests/unit/aiplatform/test_end_to_end.py | 1 +
tests/unit/aiplatform/test_endpoints.py | 1 +
tests/unit/aiplatform/test_featurestores.py | 1 +
.../test_hyperparameter_tuning_job.py | 1 +
tests/unit/aiplatform/test_initializer.py | 1 +
tests/unit/aiplatform/test_jobs.py | 2 ++
.../aiplatform/test_matching_engine_index.py | 1 +
.../test_matching_engine_index_endpoint.py | 1 +
tests/unit/aiplatform/test_metadata.py | 1 +
.../aiplatform/test_metadata_resources.py | 1 +
tests/unit/aiplatform/test_metadata_store.py | 15 +--------
.../unit/aiplatform/test_model_evaluation.py | 1 +
tests/unit/aiplatform/test_models.py | 1 +
tests/unit/aiplatform/test_pipeline_jobs.py | 1 +
tests/unit/aiplatform/test_tensorboard.py | 14 +-------
tests/unit/aiplatform/test_training_jobs.py | 5 +++
tests/unit/aiplatform/test_uploader_main.py | 1 +
tests/unit/aiplatform/test_utils.py | 1 +
26 files changed, 64 insertions(+), 40 deletions(-)
create mode 100644 tests/unit/aiplatform/conftest.py
diff --git a/tests/unit/aiplatform/conftest.py b/tests/unit/aiplatform/conftest.py
new file mode 100644
index 0000000000..1a2e9c54f1
--- /dev/null
+++ b/tests/unit/aiplatform/conftest.py
@@ -0,0 +1,32 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import pytest
+
+from google import auth
+from google.auth import credentials as auth_credentials
+from unittest.mock import patch
+
+
+@pytest.fixture(scope="module")
+def google_auth_mock():
+ with patch.object(auth, "default") as google_auth_mock:
+ google_auth_mock.return_value = (
+ auth_credentials.AnonymousCredentials(),
+ "test-project",
+ )
+ yield google_auth_mock
diff --git a/tests/unit/aiplatform/test_automl_forecasting_training_jobs.py b/tests/unit/aiplatform/test_automl_forecasting_training_jobs.py
index f7d0856460..4861470244 100644
--- a/tests/unit/aiplatform/test_automl_forecasting_training_jobs.py
+++ b/tests/unit/aiplatform/test_automl_forecasting_training_jobs.py
@@ -247,6 +247,7 @@ def mock_dataset_nontimeseries():
return ds
+@pytest.mark.usefixtures("google_auth_mock")
class TestAutoMLForecastingTrainingJob:
def setup_method(self):
importlib.reload(initializer)
diff --git a/tests/unit/aiplatform/test_automl_image_training_jobs.py b/tests/unit/aiplatform/test_automl_image_training_jobs.py
index ae87741491..30c51f2e62 100644
--- a/tests/unit/aiplatform/test_automl_image_training_jobs.py
+++ b/tests/unit/aiplatform/test_automl_image_training_jobs.py
@@ -213,6 +213,7 @@ def mock_model():
yield model
+@pytest.mark.usefixtures("google_auth_mock")
class TestAutoMLImageTrainingJob:
def setup_method(self):
importlib.reload(initializer)
diff --git a/tests/unit/aiplatform/test_automl_tabular_training_jobs.py b/tests/unit/aiplatform/test_automl_tabular_training_jobs.py
index cff46f3397..c72f664e1d 100644
--- a/tests/unit/aiplatform/test_automl_tabular_training_jobs.py
+++ b/tests/unit/aiplatform/test_automl_tabular_training_jobs.py
@@ -314,6 +314,7 @@ def mock_dataset_nontabular():
return ds
+@pytest.mark.usefixtures("google_auth_mock")
class TestAutoMLTabularTrainingJob:
def setup_method(self):
importlib.reload(initializer)
diff --git a/tests/unit/aiplatform/test_automl_text_training_jobs.py b/tests/unit/aiplatform/test_automl_text_training_jobs.py
index 5204de45da..e9c0f8cc07 100644
--- a/tests/unit/aiplatform/test_automl_text_training_jobs.py
+++ b/tests/unit/aiplatform/test_automl_text_training_jobs.py
@@ -199,6 +199,7 @@ def mock_model():
yield model
+@pytest.mark.usefixtures("google_auth_mock")
class TestAutoMLTextTrainingJob:
def setup_method(self):
importlib.reload(initializer)
diff --git a/tests/unit/aiplatform/test_automl_video_training_jobs.py b/tests/unit/aiplatform/test_automl_video_training_jobs.py
index c65acec821..3468704204 100644
--- a/tests/unit/aiplatform/test_automl_video_training_jobs.py
+++ b/tests/unit/aiplatform/test_automl_video_training_jobs.py
@@ -197,6 +197,7 @@ def mock_model():
yield model
+@pytest.mark.usefixtures("google_auth_mock")
class TestAutoMLVideoTrainingJob:
def setup_method(self):
importlib.reload(initializer)
diff --git a/tests/unit/aiplatform/test_custom_job.py b/tests/unit/aiplatform/test_custom_job.py
index fea6f70c38..c09df26ad2 100644
--- a/tests/unit/aiplatform/test_custom_job.py
+++ b/tests/unit/aiplatform/test_custom_job.py
@@ -265,6 +265,7 @@ def create_custom_job_mock_fail():
yield create_custom_job_mock
+@pytest.mark.usefixtures("google_auth_mock")
class TestCustomJob:
def setup_method(self):
reload(aiplatform.initializer)
diff --git a/tests/unit/aiplatform/test_datasets.py b/tests/unit/aiplatform/test_datasets.py
index 29dd841a52..0624264e4c 100644
--- a/tests/unit/aiplatform/test_datasets.py
+++ b/tests/unit/aiplatform/test_datasets.py
@@ -15,7 +15,6 @@
# limitations under the License.
#
-import os
import pandas as pd
@@ -26,7 +25,6 @@
from unittest.mock import patch
from google.api_core import operation
-from google.auth.exceptions import GoogleAuthError
from google.auth import credentials as auth_credentials
from google.cloud import aiplatform
@@ -530,6 +528,7 @@ def bigquery_table_schema_mock():
# TODO(b/171333554): Move reusable test fixtures to conftest.py file
+@pytest.mark.usefixtures("google_auth_mock")
class TestDataset:
def setup_method(self):
reload(initializer)
@@ -606,17 +605,6 @@ def test_init_dataset_with_id_only(self, get_dataset_mock):
name=_TEST_NAME, retry=base._DEFAULT_RETRY
)
- @pytest.mark.usefixtures("get_dataset_without_name_mock")
- @patch.dict(
- os.environ, {"GOOGLE_CLOUD_PROJECT": "", "GOOGLE_APPLICATION_CREDENTIALS": ""}
- )
- def test_init_dataset_with_id_only_without_project_or_location(self):
- with pytest.raises(GoogleAuthError):
- datasets._Dataset(
- dataset_name=_TEST_ID,
- credentials=auth_credentials.AnonymousCredentials(),
- )
-
def test_init_dataset_with_location_override(self, get_dataset_mock):
aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
datasets._Dataset(dataset_name=_TEST_ID, location=_TEST_ALT_LOCATION)
@@ -1009,6 +997,7 @@ def test_delete_dataset(self, delete_dataset_mock, sync):
delete_dataset_mock.assert_called_once_with(name=my_dataset.resource_name)
+@pytest.mark.usefixtures("google_auth_mock")
class TestImageDataset:
def setup_method(self):
reload(initializer)
@@ -1224,6 +1213,7 @@ def test_create_dataset_with_labels(self, create_dataset_mock, sync):
)
+@pytest.mark.usefixtures("google_auth_mock")
class TestTabularDataset:
def setup_method(self):
reload(initializer)
diff --git a/tests/unit/aiplatform/test_end_to_end.py b/tests/unit/aiplatform/test_end_to_end.py
index 97a95ef6dd..5f42b7628d 100644
--- a/tests/unit/aiplatform/test_end_to_end.py
+++ b/tests/unit/aiplatform/test_end_to_end.py
@@ -66,6 +66,7 @@
)
+@pytest.mark.usefixtures("google_auth_mock")
class TestEndToEnd:
def setup_method(self):
reload(initializer)
diff --git a/tests/unit/aiplatform/test_endpoints.py b/tests/unit/aiplatform/test_endpoints.py
index e0aba5ed39..70e4f0d2b0 100644
--- a/tests/unit/aiplatform/test_endpoints.py
+++ b/tests/unit/aiplatform/test_endpoints.py
@@ -397,6 +397,7 @@ def predict_client_explain_mock():
yield predict_mock
+@pytest.mark.usefixtures("google_auth_mock")
class TestEndpoint:
def setup_method(self):
reload(initializer)
diff --git a/tests/unit/aiplatform/test_featurestores.py b/tests/unit/aiplatform/test_featurestores.py
index 4c556abd17..66f7e9706c 100644
--- a/tests/unit/aiplatform/test_featurestores.py
+++ b/tests/unit/aiplatform/test_featurestores.py
@@ -702,6 +702,7 @@ def batch_create_features_mock():
yield batch_create_features_mock
+@pytest.mark.usefixtures("google_auth_mock")
class TestFeaturestoreUtils:
@pytest.mark.parametrize(
"resource_id",
diff --git a/tests/unit/aiplatform/test_hyperparameter_tuning_job.py b/tests/unit/aiplatform/test_hyperparameter_tuning_job.py
index 601079d751..30a2ea40be 100644
--- a/tests/unit/aiplatform/test_hyperparameter_tuning_job.py
+++ b/tests/unit/aiplatform/test_hyperparameter_tuning_job.py
@@ -347,6 +347,7 @@ def create_hyperparameter_tuning_job_mock_with_tensorboard():
yield create_hyperparameter_tuning_job_mock
+@pytest.mark.usefixtures("google_auth_mock")
class TestHyperparameterTuningJob:
def setup_method(self):
reload(aiplatform.initializer)
diff --git a/tests/unit/aiplatform/test_initializer.py b/tests/unit/aiplatform/test_initializer.py
index a74c4d02cb..6a31a316e1 100644
--- a/tests/unit/aiplatform/test_initializer.py
+++ b/tests/unit/aiplatform/test_initializer.py
@@ -44,6 +44,7 @@
_TEST_STAGING_BUCKET = "test-bucket"
+@pytest.mark.usefixtures("google_auth_mock")
class TestInit:
def setup_method(self):
importlib.reload(initializer)
diff --git a/tests/unit/aiplatform/test_jobs.py b/tests/unit/aiplatform/test_jobs.py
index c6cc6ffd03..364d0ef17b 100644
--- a/tests/unit/aiplatform/test_jobs.py
+++ b/tests/unit/aiplatform/test_jobs.py
@@ -180,6 +180,7 @@ def fake_job_cancel_mock():
yield fake_job_cancel_mock
+@pytest.mark.usefixtures("google_auth_mock")
class TestJob:
class FakeJob(jobs._Job):
_job_type = "custom-job"
@@ -396,6 +397,7 @@ def bq_list_rows_mock():
yield list_rows_mock
+@pytest.mark.usefixtures("google_auth_mock")
class TestBatchPredictionJob:
def setup_method(self):
reload(initializer)
diff --git a/tests/unit/aiplatform/test_matching_engine_index.py b/tests/unit/aiplatform/test_matching_engine_index.py
index 3e415ce8af..78e3a608a9 100644
--- a/tests/unit/aiplatform/test_matching_engine_index.py
+++ b/tests/unit/aiplatform/test_matching_engine_index.py
@@ -167,6 +167,7 @@ def create_index_mock():
yield create_index_mock
+@pytest.mark.usefixtures("google_auth_mock")
class TestMatchingEngineIndex:
def setup_method(self):
reload(initializer)
diff --git a/tests/unit/aiplatform/test_matching_engine_index_endpoint.py b/tests/unit/aiplatform/test_matching_engine_index_endpoint.py
index 516d1f3766..58ff16ed56 100644
--- a/tests/unit/aiplatform/test_matching_engine_index_endpoint.py
+++ b/tests/unit/aiplatform/test_matching_engine_index_endpoint.py
@@ -380,6 +380,7 @@ def create_index_endpoint_mock():
yield create_index_endpoint_mock
+@pytest.mark.usefixtures("google_auth_mock")
class TestMatchingEngineIndexEndpoint:
def setup_method(self):
reload(initializer)
diff --git a/tests/unit/aiplatform/test_metadata.py b/tests/unit/aiplatform/test_metadata.py
index 84ad3949ed..acbfa9098b 100644
--- a/tests/unit/aiplatform/test_metadata.py
+++ b/tests/unit/aiplatform/test_metadata.py
@@ -377,6 +377,7 @@ def _assert_frame_equal_with_sorted_columns(dataframe_1, dataframe_2):
)
+@pytest.mark.usefixtures("google_auth_mock")
class TestMetadata:
def setup_method(self):
reload(initializer)
diff --git a/tests/unit/aiplatform/test_metadata_resources.py b/tests/unit/aiplatform/test_metadata_resources.py
index c4ca4b74d9..2a7180adfd 100644
--- a/tests/unit/aiplatform/test_metadata_resources.py
+++ b/tests/unit/aiplatform/test_metadata_resources.py
@@ -356,6 +356,7 @@ def update_artifact_mock():
yield update_artifact_mock
+@pytest.mark.usefixtures("google_auth_mock")
class TestContext:
def setup_method(self):
reload(initializer)
diff --git a/tests/unit/aiplatform/test_metadata_store.py b/tests/unit/aiplatform/test_metadata_store.py
index 4d0ab6819c..b6dfe8e032 100644
--- a/tests/unit/aiplatform/test_metadata_store.py
+++ b/tests/unit/aiplatform/test_metadata_store.py
@@ -15,15 +15,12 @@
# limitations under the License.
#
-import os
from importlib import reload
from unittest import mock
from unittest.mock import patch
import pytest
from google.api_core import operation
-from google.auth import credentials as auth_credentials
-from google.auth.exceptions import GoogleAuthError
from google.cloud import aiplatform
from google.cloud.aiplatform import base
@@ -145,6 +142,7 @@ def setup_method(self):
def teardown_method(self):
initializer.global_pool.shutdown(wait=True)
+ @pytest.mark.usefixtures("google_auth_mock")
def test_init_metadata_store(self, get_metadata_store_mock):
aiplatform.init(project=_TEST_PROJECT)
metadata_store._MetadataStore(metadata_store_name=_TEST_NAME)
@@ -166,17 +164,6 @@ def test_init_metadata_store_with_default_id(self, get_metadata_store_mock):
name=_TEST_DEFAULT_NAME, retry=base._DEFAULT_RETRY
)
- @pytest.mark.usefixtures("get_metadata_store_without_name_mock")
- @patch.dict(
- os.environ, {"GOOGLE_CLOUD_PROJECT": "", "GOOGLE_APPLICATION_CREDENTIALS": ""}
- )
- def test_init_metadata_store_with_id_without_project_or_location(self):
- with pytest.raises(GoogleAuthError):
- metadata_store._MetadataStore(
- metadata_store_name=_TEST_ID,
- credentials=auth_credentials.AnonymousCredentials(),
- )
-
def test_init_metadata_store_with_location_override(self, get_metadata_store_mock):
aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
metadata_store._MetadataStore(
diff --git a/tests/unit/aiplatform/test_model_evaluation.py b/tests/unit/aiplatform/test_model_evaluation.py
index 91cbd77509..d45e9a1506 100644
--- a/tests/unit/aiplatform/test_model_evaluation.py
+++ b/tests/unit/aiplatform/test_model_evaluation.py
@@ -127,6 +127,7 @@ def mock_model_eval_get():
yield mock_get_model_eval
+@pytest.mark.usefixtures("google_auth_mock")
class TestModelEvaluation:
def test_init_model_evaluation_with_only_resource_name(self, mock_model_eval_get):
aiplatform.init(project=_TEST_PROJECT)
diff --git a/tests/unit/aiplatform/test_models.py b/tests/unit/aiplatform/test_models.py
index 206fb59f6f..23f933128a 100644
--- a/tests/unit/aiplatform/test_models.py
+++ b/tests/unit/aiplatform/test_models.py
@@ -541,6 +541,7 @@ def list_model_evaluations_mock():
yield list_model_evaluations_mock
+@pytest.mark.usefixtures("google_auth_mock")
class TestModel:
def setup_method(self):
importlib.reload(initializer)
diff --git a/tests/unit/aiplatform/test_pipeline_jobs.py b/tests/unit/aiplatform/test_pipeline_jobs.py
index 090cfa62bc..159400f8ce 100644
--- a/tests/unit/aiplatform/test_pipeline_jobs.py
+++ b/tests/unit/aiplatform/test_pipeline_jobs.py
@@ -283,6 +283,7 @@ def mock_load_yaml_and_json(job_spec):
yield mock_load_yaml_and_json
+@pytest.mark.usefixtures("google_auth_mock")
class TestPipelineJob:
class FakePipelineJob(pipeline_jobs.PipelineJob):
diff --git a/tests/unit/aiplatform/test_tensorboard.py b/tests/unit/aiplatform/test_tensorboard.py
index bea5d3f2f4..37e3376875 100644
--- a/tests/unit/aiplatform/test_tensorboard.py
+++ b/tests/unit/aiplatform/test_tensorboard.py
@@ -15,7 +15,6 @@
# limitations under the License.
#
-import os
import pytest
@@ -24,8 +23,6 @@
from importlib import reload
from google.api_core import operation
-from google.auth.exceptions import GoogleAuthError
-from google.auth import credentials as auth_credentials
from google.cloud import aiplatform
from google.cloud.aiplatform import base
@@ -266,6 +263,7 @@ def list_tensorboard_run_mock():
yield list_tensorboard_run_mock
+@pytest.mark.usefixtures("google_auth_mock")
class TestTensorboard:
def setup_method(self):
reload(initializer)
@@ -330,16 +328,6 @@ def test_init_tensorboard_with_project_and_alt_location(self):
location=_TEST_ALT_LOCATION,
)
- @patch.dict(
- os.environ, {"GOOGLE_CLOUD_PROJECT": "", "GOOGLE_APPLICATION_CREDENTIALS": ""}
- )
- def test_init_tensorboard_with_id_only_without_project_or_location(self):
- with pytest.raises(GoogleAuthError):
- tensorboard.Tensorboard(
- tensorboard_name=_TEST_ID,
- credentials=auth_credentials.AnonymousCredentials(),
- )
-
def test_init_tensorboard_with_location_override(self, get_tensorboard_mock):
aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
tensorboard.Tensorboard(tensorboard_name=_TEST_ID, location=_TEST_ALT_LOCATION)
diff --git a/tests/unit/aiplatform/test_training_jobs.py b/tests/unit/aiplatform/test_training_jobs.py
index 7048b0ca16..8fbf0da3e3 100644
--- a/tests/unit/aiplatform/test_training_jobs.py
+++ b/tests/unit/aiplatform/test_training_jobs.py
@@ -328,6 +328,7 @@ def mock_get_backing_custom_job_with_enable_web_access():
yield get_custom_job_mock
+@pytest.mark.usefixtures("google_auth_mock")
class TestTrainingScriptPythonPackagerHelpers:
def setup_method(self):
importlib.reload(initializer)
@@ -443,6 +444,7 @@ def test_get_python_executable_returns_python_executable(self):
assert "python" in source_utils._get_python_executable().lower()
+@pytest.mark.usefixtures("google_auth_mock")
class TestTrainingScriptPythonPackager:
def setup_method(self):
importlib.reload(initializer)
@@ -831,6 +833,7 @@ def mock_nontabular_dataset():
return ds
+@pytest.mark.usefixtures("google_auth_mock")
class TestCustomTrainingJob:
def setup_method(self):
importlib.reload(initializer)
@@ -2746,6 +2749,7 @@ def test_cancel_training_job_without_running(self, mock_pipeline_service_cancel)
assert e.match(regexp=r"TrainingJob has not been launched")
+@pytest.mark.usefixtures("google_auth_mock")
class TestCustomContainerTrainingJob:
def setup_method(self):
importlib.reload(initializer)
@@ -4705,6 +4709,7 @@ def test_machine_spec_handles_missing_pools(self):
assert spec.pool_specs == true_pool_spec
+@pytest.mark.usefixtures("google_auth_mock")
class TestCustomPythonPackageTrainingJob:
def setup_method(self):
importlib.reload(initializer)
diff --git a/tests/unit/aiplatform/test_uploader_main.py b/tests/unit/aiplatform/test_uploader_main.py
index bd3ae404d8..417c865c27 100644
--- a/tests/unit/aiplatform/test_uploader_main.py
+++ b/tests/unit/aiplatform/test_uploader_main.py
@@ -69,6 +69,7 @@ def get_custom_job_mock():
yield get_custom_job_mock
+@pytest.mark.usefixtures("google_auth_mock")
class TestUploaderMain:
def setup_method(self):
reload(initializer)
diff --git a/tests/unit/aiplatform/test_utils.py b/tests/unit/aiplatform/test_utils.py
index ecbb325e69..c700271590 100644
--- a/tests/unit/aiplatform/test_utils.py
+++ b/tests/unit/aiplatform/test_utils.py
@@ -272,6 +272,7 @@ def test_extract_bucket_and_prefix_from_gcs_path(gcs_path: str, expected: tuple)
assert expected == utils.extract_bucket_and_prefix_from_gcs_path(gcs_path)
+@pytest.mark.usefixtures("google_auth_mock")
def test_wrapped_client():
test_client_info = gapic_v1.client_info.ClientInfo()
test_client_options = client_options.ClientOptions()
From 095717c8b77dc5d66e677413a437ea6ed92e0b1a Mon Sep 17 00:00:00 2001
From: Abhishek Munagekar <10258799+munagekar@users.noreply.github.com>
Date: Tue, 24 May 2022 01:42:06 +0900
Subject: [PATCH 08/14] feat: support autoscaling metrics when deploying models
(#1197)
* feat: support autoscaling metrics when deploying models
* feat: support model deploy to endpoint with autoscaling metrics
* fix autoscaling_target_accelerator_duty_cycle check
* fix docstring: specify that autoscaling_params are optional
* bug fix: add autoscaling_target_cpu_utilization to custom_resource_spec
* add tests
* add _TEST_METRIC_NAME_CPU_UTILIZATION and _TEST_METRIC_NAME_GPU_UTILIZATION
* remove not required arguments in tests
* fix tests: wait for LRO to complete even if not sync
* fix lint: run black
Co-authored-by: Sara Robinson
---
google/cloud/aiplatform/models.py | 104 ++++++++++++++++--
tests/unit/aiplatform/test_endpoints.py | 139 ++++++++++++++++++++++++
2 files changed, 234 insertions(+), 9 deletions(-)
diff --git a/google/cloud/aiplatform/models.py b/google/cloud/aiplatform/models.py
index 1016b717bf..803ab4b159 100644
--- a/google/cloud/aiplatform/models.py
+++ b/google/cloud/aiplatform/models.py
@@ -643,6 +643,8 @@ def deploy(
metadata: Optional[Sequence[Tuple[str, str]]] = (),
sync=True,
deploy_request_timeout: Optional[float] = None,
+ autoscaling_target_cpu_utilization: Optional[int] = None,
+ autoscaling_target_accelerator_duty_cycle: Optional[int] = None,
) -> None:
"""Deploys a Model to the Endpoint.
@@ -716,6 +718,13 @@ def deploy(
be immediately returned and synced when the Future has completed.
deploy_request_timeout (float):
Optional. The timeout for the deploy request in seconds.
+ autoscaling_target_cpu_utilization (int):
+ Target CPU Utilization to use for Autoscaling Replicas.
+ A default value of 60 will be used if not specified.
+ autoscaling_target_accelerator_duty_cycle (int):
+ Target Accelerator Duty Cycle.
+ Must also set accelerator_type and accelerator_count if specified.
+ A default value of 60 will be used if not specified.
"""
self._sync_gca_resource_if_skipped()
@@ -746,6 +755,8 @@ def deploy(
metadata=metadata,
sync=sync,
deploy_request_timeout=deploy_request_timeout,
+ autoscaling_target_cpu_utilization=autoscaling_target_cpu_utilization,
+ autoscaling_target_accelerator_duty_cycle=autoscaling_target_accelerator_duty_cycle,
)
@base.optional_sync()
@@ -766,6 +777,8 @@ def _deploy(
metadata: Optional[Sequence[Tuple[str, str]]] = (),
sync=True,
deploy_request_timeout: Optional[float] = None,
+ autoscaling_target_cpu_utilization: Optional[int] = None,
+ autoscaling_target_accelerator_duty_cycle: Optional[int] = None,
) -> None:
"""Deploys a Model to the Endpoint.
@@ -839,6 +852,13 @@ def _deploy(
be immediately returned and synced when the Future has completed.
deploy_request_timeout (float):
Optional. The timeout for the deploy request in seconds.
+ autoscaling_target_cpu_utilization (int):
+ Target CPU Utilization to use for Autoscaling Replicas.
+ A default value of 60 will be used if not specified.
+ autoscaling_target_accelerator_duty_cycle (int):
+ Target Accelerator Duty Cycle.
+ Must also set accelerator_type and accelerator_count if specified.
+ A default value of 60 will be used if not specified.
Raises:
ValueError: If there is not current traffic split and traffic percentage
is not 0 or 100.
@@ -865,6 +885,8 @@ def _deploy(
explanation_parameters=explanation_parameters,
metadata=metadata,
deploy_request_timeout=deploy_request_timeout,
+ autoscaling_target_cpu_utilization=autoscaling_target_cpu_utilization,
+ autoscaling_target_accelerator_duty_cycle=autoscaling_target_accelerator_duty_cycle,
)
_LOGGER.log_action_completed_against_resource("model", "deployed", self)
@@ -891,6 +913,8 @@ def _deploy_call(
explanation_parameters: Optional[explain.ExplanationParameters] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = (),
deploy_request_timeout: Optional[float] = None,
+ autoscaling_target_cpu_utilization: Optional[int] = None,
+ autoscaling_target_accelerator_duty_cycle: Optional[int] = None,
):
"""Helper method to deploy model to endpoint.
@@ -964,6 +988,13 @@ def _deploy_call(
be immediately returned and synced when the Future has completed.
deploy_request_timeout (float):
Optional. The timeout for the deploy request in seconds.
+ autoscaling_target_cpu_utilization (int):
+ Optional. Target CPU Utilization to use for Autoscaling Replicas.
+ A default value of 60 will be used if not specified.
+ autoscaling_target_accelerator_duty_cycle (int):
+ Optional. Target Accelerator Duty Cycle.
+ Must also set accelerator_type and accelerator_count if specified.
+ A default value of 60 will be used if not specified.
Raises:
ValueError: If there is not current traffic split and traffic percentage
is not 0 or 100.
@@ -979,6 +1010,14 @@ def _deploy_call(
"Both `accelerator_type` and `accelerator_count` should be specified or None."
)
+ if autoscaling_target_accelerator_duty_cycle is not None and (
+ not accelerator_type or not accelerator_count
+ ):
+ raise ValueError(
+ "Both `accelerator_type` and `accelerator_count` should be set "
+ "when specifying autoscaling_target_accelerator_duty_cycle`"
+ )
+
deployed_model = gca_endpoint_compat.DeployedModel(
model=model.resource_name,
display_name=deployed_model_display_name,
@@ -994,7 +1033,11 @@ def _deploy_call(
in model.supported_deployment_resources_types
)
provided_custom_machine_spec = (
- machine_type or accelerator_type or accelerator_count
+ machine_type
+ or accelerator_type
+ or accelerator_count
+ or autoscaling_target_accelerator_duty_cycle
+ or autoscaling_target_cpu_utilization
)
# If the model supports both automatic and dedicated deployment resources,
@@ -1006,7 +1049,9 @@ def _deploy_call(
if provided_custom_machine_spec and not use_dedicated_resources:
_LOGGER.info(
"Model does not support dedicated deployment resources. "
- "The machine_type, accelerator_type and accelerator_count parameters are ignored."
+ "The machine_type, accelerator_type and accelerator_count,"
+ "autoscaling_target_accelerator_duty_cycle,"
+ "autoscaling_target_cpu_utilization parameters are ignored."
)
if use_dedicated_resources and not machine_type:
@@ -1014,22 +1059,41 @@ def _deploy_call(
_LOGGER.info(f"Using default machine_type: {machine_type}")
if use_dedicated_resources:
+
+ dedicated_resources = gca_machine_resources_compat.DedicatedResources(
+ min_replica_count=min_replica_count,
+ max_replica_count=max_replica_count,
+ )
+
machine_spec = gca_machine_resources_compat.MachineSpec(
machine_type=machine_type
)
+ if autoscaling_target_cpu_utilization:
+ autoscaling_metric_spec = gca_machine_resources_compat.AutoscalingMetricSpec(
+ metric_name="aiplatform.googleapis.com/prediction/online/cpu/utilization",
+ target=autoscaling_target_cpu_utilization,
+ )
+ dedicated_resources.autoscaling_metric_specs.extend(
+ [autoscaling_metric_spec]
+ )
+
if accelerator_type and accelerator_count:
utils.validate_accelerator_type(accelerator_type)
machine_spec.accelerator_type = accelerator_type
machine_spec.accelerator_count = accelerator_count
- deployed_model.dedicated_resources = (
- gca_machine_resources_compat.DedicatedResources(
- machine_spec=machine_spec,
- min_replica_count=min_replica_count,
- max_replica_count=max_replica_count,
- )
- )
+ if autoscaling_target_accelerator_duty_cycle:
+ autoscaling_metric_spec = gca_machine_resources_compat.AutoscalingMetricSpec(
+ metric_name="aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle",
+ target=autoscaling_target_accelerator_duty_cycle,
+ )
+ dedicated_resources.autoscaling_metric_specs.extend(
+ [autoscaling_metric_spec]
+ )
+
+ dedicated_resources.machine_spec = machine_spec
+ deployed_model.dedicated_resources = dedicated_resources
elif supports_automatic_resources:
deployed_model.automatic_resources = (
@@ -1994,6 +2058,8 @@ def deploy(
encryption_spec_key_name: Optional[str] = None,
sync=True,
deploy_request_timeout: Optional[float] = None,
+ autoscaling_target_cpu_utilization: Optional[int] = None,
+ autoscaling_target_accelerator_duty_cycle: Optional[int] = None,
) -> Endpoint:
"""Deploys model to endpoint. Endpoint will be created if unspecified.
@@ -2078,6 +2144,13 @@ def deploy(
be immediately returned and synced when the Future has completed.
deploy_request_timeout (float):
Optional. The timeout for the deploy request in seconds.
+ autoscaling_target_cpu_utilization (int):
+ Optional. Target CPU Utilization to use for Autoscaling Replicas.
+ A default value of 60 will be used if not specified.
+ autoscaling_target_accelerator_duty_cycle (int):
+ Optional. Target Accelerator Duty Cycle.
+ Must also set accelerator_type and accelerator_count if specified.
+ A default value of 60 will be used if not specified.
Returns:
endpoint ("Endpoint"):
Endpoint with the deployed model.
@@ -2112,6 +2185,8 @@ def deploy(
or initializer.global_config.encryption_spec_key_name,
sync=sync,
deploy_request_timeout=deploy_request_timeout,
+ autoscaling_target_cpu_utilization=autoscaling_target_cpu_utilization,
+ autoscaling_target_accelerator_duty_cycle=autoscaling_target_accelerator_duty_cycle,
)
@base.optional_sync(return_input_arg="endpoint", bind_future_to_self=False)
@@ -2133,6 +2208,8 @@ def _deploy(
encryption_spec_key_name: Optional[str] = None,
sync: bool = True,
deploy_request_timeout: Optional[float] = None,
+ autoscaling_target_cpu_utilization: Optional[int] = None,
+ autoscaling_target_accelerator_duty_cycle: Optional[int] = None,
) -> Endpoint:
"""Deploys model to endpoint. Endpoint will be created if unspecified.
@@ -2217,6 +2294,13 @@ def _deploy(
be immediately returned and synced when the Future has completed.
deploy_request_timeout (float):
Optional. The timeout for the deploy request in seconds.
+ autoscaling_target_cpu_utilization (int):
+ Optional. Target CPU Utilization to use for Autoscaling Replicas.
+ A default value of 60 will be used if not specified.
+ autoscaling_target_accelerator_duty_cycle (int):
+ Optional. Target Accelerator Duty Cycle.
+ Must also set accelerator_type and accelerator_count if specified.
+ A default value of 60 will be used if not specified.
Returns:
endpoint ("Endpoint"):
Endpoint with the deployed model.
@@ -2252,6 +2336,8 @@ def _deploy(
explanation_parameters=explanation_parameters,
metadata=metadata,
deploy_request_timeout=deploy_request_timeout,
+ autoscaling_target_cpu_utilization=autoscaling_target_cpu_utilization,
+ autoscaling_target_accelerator_duty_cycle=autoscaling_target_accelerator_duty_cycle,
)
_LOGGER.log_action_completed_against_resource("model", "deployed", endpoint)
diff --git a/tests/unit/aiplatform/test_endpoints.py b/tests/unit/aiplatform/test_endpoints.py
index 70e4f0d2b0..509eabb57c 100644
--- a/tests/unit/aiplatform/test_endpoints.py
+++ b/tests/unit/aiplatform/test_endpoints.py
@@ -103,6 +103,13 @@
_TEST_ACCELERATOR_TYPE = "NVIDIA_TESLA_P100"
_TEST_ACCELERATOR_COUNT = 2
+_TEST_METRIC_NAME_CPU_UTILIZATION = (
+ "aiplatform.googleapis.com/prediction/online/cpu/utilization"
+)
+_TEST_METRIC_NAME_GPU_UTILIZATION = (
+ "aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle"
+)
+
_TEST_EXPLANATIONS = [gca_prediction_service.explanation.Explanation(attributions=[])]
_TEST_ATTRIBUTIONS = [
@@ -1054,6 +1061,138 @@ def test_deploy_with_dedicated_resources(self, deploy_model_mock, sync):
timeout=None,
)
+ @pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_deploy_with_autoscaling_target_cpu_utilization(
+ self, deploy_model_mock, sync
+ ):
+ test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.DEDICATED_RESOURCES
+ )
+ test_endpoint.deploy(
+ model=test_model,
+ machine_type=_TEST_MACHINE_TYPE,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ sync=sync,
+ deploy_request_timeout=None,
+ autoscaling_target_cpu_utilization=70,
+ )
+
+ if not sync:
+ test_endpoint.wait()
+
+ expected_machine_spec = gca_machine_resources.MachineSpec(
+ machine_type=_TEST_MACHINE_TYPE,
+ )
+
+ expected_autoscaling_metric_spec = gca_machine_resources.AutoscalingMetricSpec(
+ metric_name=_TEST_METRIC_NAME_CPU_UTILIZATION,
+ target=70,
+ )
+
+ expected_dedicated_resources = gca_machine_resources.DedicatedResources(
+ machine_spec=expected_machine_spec,
+ min_replica_count=1,
+ max_replica_count=1,
+ )
+ expected_dedicated_resources.autoscaling_metric_specs.extend(
+ [expected_autoscaling_metric_spec]
+ )
+
+ expected_deployed_model = gca_endpoint.DeployedModel(
+ dedicated_resources=expected_dedicated_resources,
+ model=test_model.resource_name,
+ display_name=None,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+ deploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=expected_deployed_model,
+ traffic_split={"0": 100},
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_deploy_with_autoscaling_target_accelerator_duty_cycle(
+ self, deploy_model_mock, sync
+ ):
+ test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.DEDICATED_RESOURCES
+ )
+ test_endpoint.deploy(
+ model=test_model,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ sync=sync,
+ deploy_request_timeout=None,
+ autoscaling_target_accelerator_duty_cycle=70,
+ )
+
+ if not sync:
+ test_endpoint.wait()
+
+ expected_machine_spec = gca_machine_resources.MachineSpec(
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ )
+
+ expected_autoscaling_metric_spec = gca_machine_resources.AutoscalingMetricSpec(
+ metric_name=_TEST_METRIC_NAME_GPU_UTILIZATION,
+ target=70,
+ )
+
+ expected_dedicated_resources = gca_machine_resources.DedicatedResources(
+ machine_spec=expected_machine_spec,
+ min_replica_count=1,
+ max_replica_count=1,
+ )
+ expected_dedicated_resources.autoscaling_metric_specs.extend(
+ [expected_autoscaling_metric_spec]
+ )
+
+ expected_deployed_model = gca_endpoint.DeployedModel(
+ dedicated_resources=expected_dedicated_resources,
+ model=test_model.resource_name,
+ display_name=None,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ )
+ deploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=expected_deployed_model,
+ traffic_split={"0": 100},
+ metadata=(),
+ timeout=None,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock")
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_deploy_with_autoscaling_target_accelerator_duty_cycle_and_no_accelerator_type_or_count_raises(
+ self, sync
+ ):
+ with pytest.raises(ValueError):
+ test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.DEDICATED_RESOURCES
+ )
+ test_endpoint.deploy(
+ model=test_model,
+ sync=sync,
+ autoscaling_target_accelerator_duty_cycle=70,
+ )
+
+ if not sync:
+ test_endpoint.wait()
+
@pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock")
@pytest.mark.parametrize("sync", [True, False])
def test_deploy_with_explanations(self, deploy_model_with_explanations_mock, sync):
From b4a0beed6cc98514d987a810c11851302706bd83 Mon Sep 17 00:00:00 2001
From: Sara Robinson
Date: Tue, 24 May 2022 16:03:31 -0400
Subject: [PATCH 09/14] chore: add tear_down_resources fixture to individual
system test classes (#1252)
---
tests/system/aiplatform/test_e2e_tabular.py | 4 +++-
tests/system/aiplatform/test_metadata.py | 3 +++
tests/system/aiplatform/test_model_upload.py | 2 +-
tests/system/aiplatform/test_tensorboard.py | 3 +++
4 files changed, 10 insertions(+), 2 deletions(-)
diff --git a/tests/system/aiplatform/test_e2e_tabular.py b/tests/system/aiplatform/test_e2e_tabular.py
index 31b9bb9769..6746e816a9 100644
--- a/tests/system/aiplatform/test_e2e_tabular.py
+++ b/tests/system/aiplatform/test_e2e_tabular.py
@@ -46,7 +46,9 @@
}
-@pytest.mark.usefixtures("prepare_staging_bucket", "delete_staging_bucket")
+@pytest.mark.usefixtures(
+ "prepare_staging_bucket", "delete_staging_bucket", "tear_down_resources"
+)
class TestEndToEndTabular(e2e_base.TestEndToEnd):
"""End to end system test of the Vertex SDK with tabular data adapted from
reference notebook http://shortn/_eyoNx3SN0X"""
diff --git a/tests/system/aiplatform/test_metadata.py b/tests/system/aiplatform/test_metadata.py
index 3d2116fa0a..d5c076e75b 100644
--- a/tests/system/aiplatform/test_metadata.py
+++ b/tests/system/aiplatform/test_metadata.py
@@ -15,6 +15,8 @@
# limitations under the License.
#
+import pytest
+
from google.cloud import aiplatform
from tests.system.aiplatform import e2e_base
@@ -24,6 +26,7 @@
METRICS = {"sdk-metric-test-1": 0.8, "sdk-metric-test-2": 100}
+@pytest.mark.usefixtures("tear_down_resources")
class TestMetadata(e2e_base.TestEndToEnd):
_temp_prefix = "temp-vertex-sdk-e2e-test"
diff --git a/tests/system/aiplatform/test_model_upload.py b/tests/system/aiplatform/test_model_upload.py
index 3acc20c103..5591377a1e 100644
--- a/tests/system/aiplatform/test_model_upload.py
+++ b/tests/system/aiplatform/test_model_upload.py
@@ -28,7 +28,7 @@
_XGBOOST_MODEL_URI = "gs://cloud-samples-data-us-central1/vertex-ai/google-cloud-aiplatform-ci-artifacts/models/iris_xgboost/model.bst"
-@pytest.mark.usefixtures("delete_staging_bucket")
+@pytest.mark.usefixtures("delete_staging_bucket", "tear_down_resources")
class TestModel(e2e_base.TestEndToEnd):
_temp_prefix = "temp_vertex_sdk_e2e_model_upload_test"
diff --git a/tests/system/aiplatform/test_tensorboard.py b/tests/system/aiplatform/test_tensorboard.py
index 501205122f..a1f4634bd9 100644
--- a/tests/system/aiplatform/test_tensorboard.py
+++ b/tests/system/aiplatform/test_tensorboard.py
@@ -15,10 +15,13 @@
# limitations under the License.
#
+import pytest
+
from google.cloud import aiplatform
from tests.system.aiplatform import e2e_base
+@pytest.mark.usefixtures("tear_down_resources")
class TestTensorboard(e2e_base.TestEndToEnd):
_temp_prefix = "temp-vertex-sdk-e2e-test"
From 0ecfe1e7ab8687c13cb4267985e8b6ebc7bd2534 Mon Sep 17 00:00:00 2001
From: Morgan Du
Date: Thu, 26 May 2022 09:07:01 -0700
Subject: [PATCH 10/14] feat: add update endpoint (#1162)
* feat: add update endpoint
* add validate_traffic and validate_traffic_split
* remove validation, add system tests
* Text fixes
* Nox blacken change
Co-authored-by: Sam Goodman
---
google/cloud/aiplatform/models.py | 114 +++++++++++++++-
tests/system/aiplatform/test_model_upload.py | 9 ++
tests/unit/aiplatform/test_endpoints.py | 133 +++++++++++++------
3 files changed, 211 insertions(+), 45 deletions(-)
diff --git a/google/cloud/aiplatform/models.py b/google/cloud/aiplatform/models.py
index 803ab4b159..7d104b3112 100644
--- a/google/cloud/aiplatform/models.py
+++ b/google/cloud/aiplatform/models.py
@@ -51,6 +51,7 @@
from google.protobuf import field_mask_pb2, json_format
_DEFAULT_MACHINE_TYPE = "n1-standard-2"
+_DEPLOYING_MODEL_TRAFFIC_SPLIT_KEY = "0"
_LOGGER = base.Logger(__name__)
@@ -485,7 +486,7 @@ def _allocate_traffic(
new_traffic_split[deployed_model] += 1
unallocated_traffic -= 1
- new_traffic_split["0"] = traffic_percentage
+ new_traffic_split[_DEPLOYING_MODEL_TRAFFIC_SPLIT_KEY] = traffic_percentage
return new_traffic_split
@@ -611,7 +612,6 @@ def _validate_deploy_args(
raise ValueError("Traffic percentage cannot be negative.")
elif traffic_split:
- # TODO(b/172678233) verify every referenced deployed model exists
if sum(traffic_split.values()) != 100:
raise ValueError(
"Sum of all traffic within traffic split needs to be 100."
@@ -1290,6 +1290,110 @@ def _instantiate_prediction_client(
prediction_client=True,
)
+ def update(
+ self,
+ display_name: Optional[str] = None,
+ description: Optional[str] = None,
+ labels: Optional[Dict[str, str]] = None,
+ traffic_split: Optional[Dict[str, int]] = None,
+ request_metadata: Optional[Sequence[Tuple[str, str]]] = (),
+ update_request_timeout: Optional[float] = None,
+ ) -> "Endpoint":
+ """Updates an endpoint.
+
+ Example usage:
+
+ my_endpoint = my_endpoint.update(
+ display_name='my-updated-endpoint',
+ description='my updated description',
+ labels={'key': 'value'},
+ traffic_split={
+ '123456': 20,
+ '234567': 80,
+ },
+ )
+
+ Args:
+ display_name (str):
+ Optional. The display name of the Endpoint.
+ The name can be up to 128 characters long and can be consist of any UTF-8
+ characters.
+ description (str):
+ Optional. The description of the Endpoint.
+ labels (Dict[str, str]):
+ Optional. The labels with user-defined metadata to organize your Endpoints.
+ Label keys and values can be no longer than 64 characters
+ (Unicode codepoints), can only contain lowercase letters, numeric
+ characters, underscores and dashes. International characters are allowed.
+ See https://goo.gl/xmQnxf for more information and examples of labels.
+ traffic_split (Dict[str, int]):
+ Optional. A map from a DeployedModel's ID to the percentage of this Endpoint's
+ traffic that should be forwarded to that DeployedModel.
+ If a DeployedModel's ID is not listed in this map, then it receives no traffic.
+ The traffic percentage values must add up to 100, or map must be empty if
+ the Endpoint is to not accept any traffic at a moment.
+ request_metadata (Sequence[Tuple[str, str]]):
+ Optional. Strings which should be sent along with the request as metadata.
+ update_request_timeout (float):
+ Optional. The timeout for the update request in seconds.
+
+ Returns:
+ Endpoint - Updated endpoint resource.
+
+ Raises:
+ ValueError: If `labels` is not the correct format.
+ """
+
+ self.wait()
+
+ current_endpoint_proto = self.gca_resource
+ copied_endpoint_proto = current_endpoint_proto.__class__(current_endpoint_proto)
+
+ update_mask: List[str] = []
+
+ if display_name:
+ utils.validate_display_name(display_name)
+ copied_endpoint_proto.display_name = display_name
+ update_mask.append("display_name")
+
+ if description:
+ copied_endpoint_proto.description = description
+ update_mask.append("description")
+
+ if labels:
+ utils.validate_labels(labels)
+ copied_endpoint_proto.labels = labels
+ update_mask.append("labels")
+
+ if traffic_split:
+ update_mask.append("traffic_split")
+ copied_endpoint_proto.traffic_split = traffic_split
+
+ update_mask = field_mask_pb2.FieldMask(paths=update_mask)
+
+ _LOGGER.log_action_start_against_resource(
+ "Updating",
+ "endpoint",
+ self,
+ )
+
+ update_endpoint_lro = self.api_client.update_endpoint(
+ endpoint=copied_endpoint_proto,
+ update_mask=update_mask,
+ metadata=request_metadata,
+ timeout=update_request_timeout,
+ )
+
+ _LOGGER.log_action_started_against_resource_with_lro(
+ "Update", "endpoint", self.__class__, update_endpoint_lro
+ )
+
+ update_endpoint_lro.result()
+
+ _LOGGER.log_action_completed_against_resource("endpoint", "updated", self)
+
+ return self
+
def predict(
self,
instances: List,
@@ -1445,15 +1549,15 @@ def list(
credentials=credentials,
)
- def list_models(self) -> Sequence[gca_endpoint_compat.DeployedModel]:
+ def list_models(self) -> List[gca_endpoint_compat.DeployedModel]:
"""Returns a list of the models deployed to this Endpoint.
Returns:
- deployed_models (Sequence[aiplatform.gapic.DeployedModel]):
+ deployed_models (List[aiplatform.gapic.DeployedModel]):
A list of the models deployed in this Endpoint.
"""
self._sync_gca_resource()
- return self._gca_resource.deployed_models
+ return list(self._gca_resource.deployed_models)
def undeploy_all(self, sync: bool = True) -> "Endpoint":
"""Undeploys every model deployed to this Endpoint.
diff --git a/tests/system/aiplatform/test_model_upload.py b/tests/system/aiplatform/test_model_upload.py
index 5591377a1e..3187453561 100644
--- a/tests/system/aiplatform/test_model_upload.py
+++ b/tests/system/aiplatform/test_model_upload.py
@@ -76,3 +76,12 @@ def test_upload_and_deploy_xgboost_model(self, shared_state):
assert model.display_name == "new_name"
assert model.description == "new_description"
assert model.labels == {"my_label": "updated"}
+
+ assert len(endpoint.list_models) == 1
+ endpoint.deploy(model, traffic_percentage=100)
+ assert len(endpoint.list_models) == 2
+ traffic_split = {
+ deployed_model.id: 50 for deployed_model in endpoint.list_models()
+ }
+ endpoint.update(traffic_split=traffic_split)
+ assert endpoint.traffic_split == traffic_split
diff --git a/tests/unit/aiplatform/test_endpoints.py b/tests/unit/aiplatform/test_endpoints.py
index 509eabb57c..3e79328b34 100644
--- a/tests/unit/aiplatform/test_endpoints.py
+++ b/tests/unit/aiplatform/test_endpoints.py
@@ -25,6 +25,8 @@
from google.api_core import operation as ga_operation
from google.auth import credentials as auth_credentials
+from google.protobuf import field_mask_pb2
+
from google.cloud import aiplatform
from google.cloud.aiplatform import base
from google.cloud.aiplatform import initializer
@@ -58,6 +60,8 @@
_TEST_ID_2 = "4366591682456584192"
_TEST_ID_3 = "5820582938582924817"
_TEST_DESCRIPTION = "test-description"
+_TEST_REQUEST_METADATA = ()
+_TEST_TIMEOUT = None
_TEST_ENDPOINT_NAME = (
f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/endpoints/{_TEST_ID}"
@@ -270,6 +274,16 @@ def create_endpoint_mock():
yield create_endpoint_mock
+@pytest.fixture
+def update_endpoint_mock():
+ with mock.patch.object(
+ endpoint_service_client.EndpointServiceClient, "update_endpoint"
+ ) as update_endpoint_mock:
+ update_endpoint_lro_mock = mock.Mock(ga_operation.Operation)
+ update_endpoint_mock.return_value = update_endpoint_lro_mock
+ yield update_endpoint_mock
+
+
@pytest.fixture
def deploy_model_mock():
with mock.patch.object(
@@ -726,6 +740,54 @@ def test_create_with_labels(self, create_endpoint_mock, sync):
timeout=None,
)
+ @pytest.mark.usefixtures("get_endpoint_mock")
+ def test_update_endpoint(self, update_endpoint_mock):
+ endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
+ endpoint.update(
+ display_name=_TEST_DISPLAY_NAME,
+ description=_TEST_DESCRIPTION,
+ labels=_TEST_LABELS,
+ )
+
+ expected_endpoint = gca_endpoint.Endpoint(
+ name=_TEST_ENDPOINT_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ description=_TEST_DESCRIPTION,
+ labels=_TEST_LABELS,
+ encryption_spec=_TEST_ENCRYPTION_SPEC,
+ )
+
+ expected_update_mask = field_mask_pb2.FieldMask(
+ paths=["display_name", "description", "labels"]
+ )
+
+ update_endpoint_mock.assert_called_once_with(
+ endpoint=expected_endpoint,
+ update_mask=expected_update_mask,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=_TEST_TIMEOUT,
+ )
+
+ @pytest.mark.usefixtures("get_endpoint_with_models_mock")
+ def test_update_traffic_split(self, update_endpoint_mock):
+ endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
+ endpoint.update(traffic_split={_TEST_ID: 10, _TEST_ID_2: 80, _TEST_ID_3: 10})
+
+ expected_endpoint = gca_endpoint.Endpoint(
+ name=_TEST_ENDPOINT_NAME,
+ display_name=_TEST_DISPLAY_NAME,
+ deployed_models=_TEST_DEPLOYED_MODELS,
+ traffic_split={_TEST_ID: 10, _TEST_ID_2: 80, _TEST_ID_3: 10},
+ )
+ expected_update_mask = field_mask_pb2.FieldMask(paths=["traffic_split"])
+
+ update_endpoint_mock.assert_called_once_with(
+ endpoint=expected_endpoint,
+ update_mask=expected_update_mask,
+ metadata=_TEST_REQUEST_METADATA,
+ timeout=_TEST_TIMEOUT,
+ )
+
@pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock")
@pytest.mark.parametrize("sync", [True, False])
def test_deploy(self, deploy_model_mock, sync):
@@ -920,7 +982,7 @@ def test_deploy_raise_error_max_replica(self, sync):
)
test_endpoint.deploy(model=test_model, max_replica_count=-2, sync=sync)
- @pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock")
+ @pytest.mark.usefixtures("get_endpoint_with_models_mock", "get_model_mock")
@pytest.mark.parametrize("sync", [True, False])
def test_deploy_raise_error_traffic_split(self, sync):
with pytest.raises(ValueError):
@@ -973,48 +1035,39 @@ def test_deploy_with_traffic_percent(self, deploy_model_mock, sync):
timeout=None,
)
- @pytest.mark.usefixtures("get_model_mock")
+ @pytest.mark.usefixtures("get_endpoint_with_models_mock", "get_model_mock")
@pytest.mark.parametrize("sync", [True, False])
def test_deploy_with_traffic_split(self, deploy_model_mock, sync):
- with mock.patch.object(
- endpoint_service_client.EndpointServiceClient, "get_endpoint"
- ) as get_endpoint_mock:
- get_endpoint_mock.return_value = gca_endpoint.Endpoint(
- display_name=_TEST_DISPLAY_NAME,
- name=_TEST_ENDPOINT_NAME,
- traffic_split={"model1": 100},
- )
-
- test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
- test_model = models.Model(_TEST_ID)
- test_model._gca_resource.supported_deployment_resources_types.append(
- aiplatform.gapic.Model.DeploymentResourcesType.AUTOMATIC_RESOURCES
- )
- test_endpoint.deploy(
- model=test_model,
- traffic_split={"model1": 30, "0": 70},
- sync=sync,
- deploy_request_timeout=None,
- )
+ test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
+ test_model = models.Model(_TEST_ID)
+ test_model._gca_resource.supported_deployment_resources_types.append(
+ aiplatform.gapic.Model.DeploymentResourcesType.AUTOMATIC_RESOURCES
+ )
+ test_endpoint.deploy(
+ model=test_model,
+ traffic_split={_TEST_ID: 10, _TEST_ID_2: 40, _TEST_ID_3: 10, "0": 40},
+ sync=sync,
+ deploy_request_timeout=None,
+ )
- if not sync:
- test_endpoint.wait()
- automatic_resources = gca_machine_resources.AutomaticResources(
- min_replica_count=1,
- max_replica_count=1,
- )
- deployed_model = gca_endpoint.DeployedModel(
- automatic_resources=automatic_resources,
- model=test_model.resource_name,
- display_name=None,
- )
- deploy_model_mock.assert_called_once_with(
- endpoint=test_endpoint.resource_name,
- deployed_model=deployed_model,
- traffic_split={"model1": 30, "0": 70},
- metadata=(),
- timeout=None,
- )
+ if not sync:
+ test_endpoint.wait()
+ automatic_resources = gca_machine_resources.AutomaticResources(
+ min_replica_count=1,
+ max_replica_count=1,
+ )
+ deployed_model = gca_endpoint.DeployedModel(
+ automatic_resources=automatic_resources,
+ model=test_model.resource_name,
+ display_name=None,
+ )
+ deploy_model_mock.assert_called_once_with(
+ endpoint=test_endpoint.resource_name,
+ deployed_model=deployed_model,
+ traffic_split={_TEST_ID: 10, _TEST_ID_2: 40, _TEST_ID_3: 10, "0": 40},
+ metadata=(),
+ timeout=None,
+ )
@pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock")
@pytest.mark.parametrize("sync", [True, False])
From 74dbabd89bbc3887985ceb782ed8f20802edb776 Mon Sep 17 00:00:00 2001
From: Sara Robinson
Date: Thu, 26 May 2022 17:02:06 -0400
Subject: [PATCH 11/14] chore: refactor training job tests to configure temp
test directory (#1254)
* replace filepath in test with tmpdir var
* update script path var in training job tests
* run linter
---
tests/unit/aiplatform/test_training_jobs.py | 22 ++++++++++++---------
1 file changed, 13 insertions(+), 9 deletions(-)
diff --git a/tests/unit/aiplatform/test_training_jobs.py b/tests/unit/aiplatform/test_training_jobs.py
index 8fbf0da3e3..a956584663 100644
--- a/tests/unit/aiplatform/test_training_jobs.py
+++ b/tests/unit/aiplatform/test_training_jobs.py
@@ -17,6 +17,7 @@
from distutils import core
import copy
+import os
import functools
import importlib
import logging
@@ -73,7 +74,8 @@
_TEST_GCS_PATH = f"{_TEST_BUCKET_NAME}/{_TEST_GCS_PATH_WITHOUT_BUCKET}"
_TEST_GCS_PATH_WITH_TRAILING_SLASH = f"{_TEST_GCS_PATH}/"
_TEST_LOCAL_SCRIPT_FILE_NAME = "____test____script.py"
-_TEST_LOCAL_SCRIPT_FILE_PATH = f"path/to/{_TEST_LOCAL_SCRIPT_FILE_NAME}"
+_TEST_TEMPDIR = tempfile.mkdtemp()
+_TEST_LOCAL_SCRIPT_FILE_PATH = os.path.join(_TEST_TEMPDIR, _TEST_LOCAL_SCRIPT_FILE_NAME)
_TEST_PYTHON_SOURCE = """
print('hello world')
"""
@@ -449,11 +451,11 @@ class TestTrainingScriptPythonPackager:
def setup_method(self):
importlib.reload(initializer)
importlib.reload(aiplatform)
- with open(_TEST_LOCAL_SCRIPT_FILE_NAME, "w") as fp:
+ with open(_TEST_LOCAL_SCRIPT_FILE_PATH, "w") as fp:
fp.write(_TEST_PYTHON_SOURCE)
def teardown_method(self):
- pathlib.Path(_TEST_LOCAL_SCRIPT_FILE_NAME).unlink()
+ pathlib.Path(_TEST_LOCAL_SCRIPT_FILE_PATH).unlink()
python_package_file = f"{source_utils._TrainingScriptPythonPackager._ROOT_MODULE}-{source_utils._TrainingScriptPythonPackager._SETUP_PY_VERSION}.tar.gz"
if pathlib.Path(python_package_file).is_file():
pathlib.Path(python_package_file).unlink()
@@ -467,14 +469,14 @@ def teardown_method(self):
)
def test_packager_creates_and_copies_python_package(self):
- tsp = source_utils._TrainingScriptPythonPackager(_TEST_LOCAL_SCRIPT_FILE_NAME)
+ tsp = source_utils._TrainingScriptPythonPackager(_TEST_LOCAL_SCRIPT_FILE_PATH)
tsp.package_and_copy(copy_method=local_copy_method)
assert pathlib.Path(
f"{tsp._ROOT_MODULE}-{tsp._SETUP_PY_VERSION}.tar.gz"
).is_file()
def test_created_package_module_is_installable_and_can_be_run(self):
- tsp = source_utils._TrainingScriptPythonPackager(_TEST_LOCAL_SCRIPT_FILE_NAME)
+ tsp = source_utils._TrainingScriptPythonPackager(_TEST_LOCAL_SCRIPT_FILE_PATH)
source_dist_path = tsp.package_and_copy(copy_method=local_copy_method)
subprocess.check_output(["pip3", "install", source_dist_path])
module_output = subprocess.check_output(
@@ -484,7 +486,7 @@ def test_created_package_module_is_installable_and_can_be_run(self):
def test_requirements_are_in_package(self):
tsp = source_utils._TrainingScriptPythonPackager(
- _TEST_LOCAL_SCRIPT_FILE_NAME, requirements=_TEST_REQUIREMENTS
+ _TEST_LOCAL_SCRIPT_FILE_PATH, requirements=_TEST_REQUIREMENTS
)
source_dist_path = tsp.package_and_copy(copy_method=local_copy_method)
with tarfile.open(source_dist_path) as tf:
@@ -503,7 +505,7 @@ def test_packaging_fails_whith_RuntimeError(self):
mock_subprocess.returncode = 1
mock_popen.return_value = mock_subprocess
tsp = source_utils._TrainingScriptPythonPackager(
- _TEST_LOCAL_SCRIPT_FILE_NAME
+ _TEST_LOCAL_SCRIPT_FILE_PATH
)
with pytest.raises(RuntimeError):
tsp.package_and_copy(copy_method=local_copy_method)
@@ -511,7 +513,7 @@ def test_packaging_fails_whith_RuntimeError(self):
def test_package_and_copy_to_gcs_copies_to_gcs(self, mock_client_bucket):
mock_client_bucket, mock_blob = mock_client_bucket
- tsp = source_utils._TrainingScriptPythonPackager(_TEST_LOCAL_SCRIPT_FILE_NAME)
+ tsp = source_utils._TrainingScriptPythonPackager(_TEST_LOCAL_SCRIPT_FILE_PATH)
gcs_path = tsp.package_and_copy_to_gcs(
gcs_staging_dir=_TEST_BUCKET_NAME, project=_TEST_PROJECT
@@ -838,7 +840,9 @@ class TestCustomTrainingJob:
def setup_method(self):
importlib.reload(initializer)
importlib.reload(aiplatform)
- self._local_script_file_name = f"{uuid.uuid4()}-{_TEST_LOCAL_SCRIPT_FILE_NAME}"
+ self._local_script_file_name = os.path.join(
+ _TEST_TEMPDIR, f"{uuid.uuid4()}-{_TEST_LOCAL_SCRIPT_FILE_NAME}"
+ )
with open(self._local_script_file_name, "w") as fp:
fp.write(_TEST_PYTHON_SOURCE)
From 406c868344280d424f4191c98bcbbdeaf947b2d1 Mon Sep 17 00:00:00 2001
From: Anthonios Partheniou
Date: Thu, 26 May 2022 19:41:50 -0400
Subject: [PATCH 12/14] fix: regenerate pb2 files using grpcio-tools (#1394)
---
.../_protos/match_service_pb2.py | 706 ++----------------
.../_protos/match_service_pb2_grpc.py | 16 +
2 files changed, 62 insertions(+), 660 deletions(-)
diff --git a/google/cloud/aiplatform/matching_engine/_protos/match_service_pb2.py b/google/cloud/aiplatform/matching_engine/_protos/match_service_pb2.py
index 8d2adac367..6b3ab988b2 100644
--- a/google/cloud/aiplatform/matching_engine/_protos/match_service_pb2.py
+++ b/google/cloud/aiplatform/matching_engine/_protos/match_service_pb2.py
@@ -13,11 +13,12 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-#
+
# Generated by the protocol buffer compiler. DO NOT EDIT!
-# source: match_service.proto
+# source: google/cloud/aiplatform/matching_engine/_protos/match_service.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
@@ -30,629 +31,29 @@
from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2
-DESCRIPTOR = _descriptor.FileDescriptor(
- name="match_service.proto",
- package="google.cloud.aiplatform.container.v1beta1",
- syntax="proto3",
- serialized_options=None,
- create_key=_descriptor._internal_create_key,
- serialized_pb=b'\n\x13match_service.proto\x12)google.cloud.aiplatform.container.v1beta1\x1a\x17google/rpc/status.proto"\x97\x02\n\x0cMatchRequest\x12\x19\n\x11\x64\x65ployed_index_id\x18\x01 \x01(\t\x12\x11\n\tfloat_val\x18\x02 \x03(\x02\x12\x15\n\rnum_neighbors\x18\x03 \x01(\x05\x12G\n\trestricts\x18\x04 \x03(\x0b\x32\x34.google.cloud.aiplatform.container.v1beta1.Namespace\x12,\n$per_crowding_attribute_num_neighbors\x18\x05 \x01(\x05\x12\x1c\n\x14\x61pprox_num_neighbors\x18\x06 \x01(\x05\x12-\n%leaf_nodes_to_search_percent_override\x18\x07 \x01(\x05"\x8e\x01\n\rMatchResponse\x12S\n\x08neighbor\x18\x01 \x03(\x0b\x32\x41.google.cloud.aiplatform.container.v1beta1.MatchResponse.Neighbor\x1a(\n\x08Neighbor\x12\n\n\x02id\x18\x01 \x01(\t\x12\x10\n\x08\x64istance\x18\x02 \x01(\x01"\x9f\x02\n\x11\x42\x61tchMatchRequest\x12h\n\x08requests\x18\x01 \x03(\x0b\x32V.google.cloud.aiplatform.container.v1beta1.BatchMatchRequest.BatchMatchRequestPerIndex\x1a\x9f\x01\n\x19\x42\x61tchMatchRequestPerIndex\x12\x19\n\x11\x64\x65ployed_index_id\x18\x01 \x01(\t\x12I\n\x08requests\x18\x02 \x03(\x0b\x32\x37.google.cloud.aiplatform.container.v1beta1.MatchRequest\x12\x1c\n\x14low_level_batch_size\x18\x03 \x01(\x05"\xac\x02\n\x12\x42\x61tchMatchResponse\x12k\n\tresponses\x18\x01 \x03(\x0b\x32X.google.cloud.aiplatform.container.v1beta1.BatchMatchResponse.BatchMatchResponsePerIndex\x1a\xa8\x01\n\x1a\x42\x61tchMatchResponsePerIndex\x12\x19\n\x11\x64\x65ployed_index_id\x18\x01 \x01(\t\x12K\n\tresponses\x18\x02 \x03(\x0b\x32\x38.google.cloud.aiplatform.container.v1beta1.MatchResponse\x12"\n\x06status\x18\x03 \x01(\x0b\x32\x12.google.rpc.Status"D\n\tNamespace\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x61llow_tokens\x18\x02 \x03(\t\x12\x13\n\x0b\x64\x65ny_tokens\x18\x03 \x03(\t2\x9a\x02\n\x0cMatchService\x12|\n\x05Match\x12\x37.google.cloud.aiplatform.container.v1beta1.MatchRequest\x1a\x38.google.cloud.aiplatform.container.v1beta1.MatchResponse"\x00\x12\x8b\x01\n\nBatchMatch\x12<.google.cloud.aiplatform.container.v1beta1.BatchMatchRequest\x1a=.google.cloud.aiplatform.container.v1beta1.BatchMatchResponse"\x00\x62\x06proto3',
- dependencies=[
- google_dot_rpc_dot_status__pb2.DESCRIPTOR,
- ],
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
+ b'\nCgoogle/cloud/aiplatform/matching_engine/_protos/match_service.proto\x12)google.cloud.aiplatform.container.v1beta1\x1a\x17google/rpc/status.proto"\x97\x02\n\x0cMatchRequest\x12\x19\n\x11\x64\x65ployed_index_id\x18\x01 \x01(\t\x12\x11\n\tfloat_val\x18\x02 \x03(\x02\x12\x15\n\rnum_neighbors\x18\x03 \x01(\x05\x12G\n\trestricts\x18\x04 \x03(\x0b\x32\x34.google.cloud.aiplatform.container.v1beta1.Namespace\x12,\n$per_crowding_attribute_num_neighbors\x18\x05 \x01(\x05\x12\x1c\n\x14\x61pprox_num_neighbors\x18\x06 \x01(\x05\x12-\n%leaf_nodes_to_search_percent_override\x18\x07 \x01(\x05"\x8e\x01\n\rMatchResponse\x12S\n\x08neighbor\x18\x01 \x03(\x0b\x32\x41.google.cloud.aiplatform.container.v1beta1.MatchResponse.Neighbor\x1a(\n\x08Neighbor\x12\n\n\x02id\x18\x01 \x01(\t\x12\x10\n\x08\x64istance\x18\x02 \x01(\x01"\x9f\x02\n\x11\x42\x61tchMatchRequest\x12h\n\x08requests\x18\x01 \x03(\x0b\x32V.google.cloud.aiplatform.container.v1beta1.BatchMatchRequest.BatchMatchRequestPerIndex\x1a\x9f\x01\n\x19\x42\x61tchMatchRequestPerIndex\x12\x19\n\x11\x64\x65ployed_index_id\x18\x01 \x01(\t\x12I\n\x08requests\x18\x02 \x03(\x0b\x32\x37.google.cloud.aiplatform.container.v1beta1.MatchRequest\x12\x1c\n\x14low_level_batch_size\x18\x03 \x01(\x05"\xac\x02\n\x12\x42\x61tchMatchResponse\x12k\n\tresponses\x18\x01 \x03(\x0b\x32X.google.cloud.aiplatform.container.v1beta1.BatchMatchResponse.BatchMatchResponsePerIndex\x1a\xa8\x01\n\x1a\x42\x61tchMatchResponsePerIndex\x12\x19\n\x11\x64\x65ployed_index_id\x18\x01 \x01(\t\x12K\n\tresponses\x18\x02 \x03(\x0b\x32\x38.google.cloud.aiplatform.container.v1beta1.MatchResponse\x12"\n\x06status\x18\x03 \x01(\x0b\x32\x12.google.rpc.Status"D\n\tNamespace\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x61llow_tokens\x18\x02 \x03(\t\x12\x13\n\x0b\x64\x65ny_tokens\x18\x03 \x03(\t2\x9a\x02\n\x0cMatchService\x12|\n\x05Match\x12\x37.google.cloud.aiplatform.container.v1beta1.MatchRequest\x1a\x38.google.cloud.aiplatform.container.v1beta1.MatchResponse"\x00\x12\x8b\x01\n\nBatchMatch\x12<.google.cloud.aiplatform.container.v1beta1.BatchMatchRequest\x1a=.google.cloud.aiplatform.container.v1beta1.BatchMatchResponse"\x00\x62\x06proto3'
)
-_MATCHREQUEST = _descriptor.Descriptor(
- name="MatchRequest",
- full_name="google.cloud.aiplatform.container.v1beta1.MatchRequest",
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- create_key=_descriptor._internal_create_key,
- fields=[
- _descriptor.FieldDescriptor(
- name="deployed_index_id",
- full_name="google.cloud.aiplatform.container.v1beta1.MatchRequest.deployed_index_id",
- index=0,
- number=1,
- type=9,
- cpp_type=9,
- label=1,
- has_default_value=False,
- default_value=b"".decode("utf-8"),
- message_type=None,
- enum_type=None,
- containing_type=None,
- is_extension=False,
- extension_scope=None,
- serialized_options=None,
- file=DESCRIPTOR,
- create_key=_descriptor._internal_create_key,
- ),
- _descriptor.FieldDescriptor(
- name="float_val",
- full_name="google.cloud.aiplatform.container.v1beta1.MatchRequest.float_val",
- index=1,
- number=2,
- type=2,
- cpp_type=6,
- label=3,
- has_default_value=False,
- default_value=[],
- message_type=None,
- enum_type=None,
- containing_type=None,
- is_extension=False,
- extension_scope=None,
- serialized_options=None,
- file=DESCRIPTOR,
- create_key=_descriptor._internal_create_key,
- ),
- _descriptor.FieldDescriptor(
- name="num_neighbors",
- full_name="google.cloud.aiplatform.container.v1beta1.MatchRequest.num_neighbors",
- index=2,
- number=3,
- type=5,
- cpp_type=1,
- label=1,
- has_default_value=False,
- default_value=0,
- message_type=None,
- enum_type=None,
- containing_type=None,
- is_extension=False,
- extension_scope=None,
- serialized_options=None,
- file=DESCRIPTOR,
- create_key=_descriptor._internal_create_key,
- ),
- _descriptor.FieldDescriptor(
- name="restricts",
- full_name="google.cloud.aiplatform.container.v1beta1.MatchRequest.restricts",
- index=3,
- number=4,
- type=11,
- cpp_type=10,
- label=3,
- has_default_value=False,
- default_value=[],
- message_type=None,
- enum_type=None,
- containing_type=None,
- is_extension=False,
- extension_scope=None,
- serialized_options=None,
- file=DESCRIPTOR,
- create_key=_descriptor._internal_create_key,
- ),
- _descriptor.FieldDescriptor(
- name="per_crowding_attribute_num_neighbors",
- full_name="google.cloud.aiplatform.container.v1beta1.MatchRequest.per_crowding_attribute_num_neighbors",
- index=4,
- number=5,
- type=5,
- cpp_type=1,
- label=1,
- has_default_value=False,
- default_value=0,
- message_type=None,
- enum_type=None,
- containing_type=None,
- is_extension=False,
- extension_scope=None,
- serialized_options=None,
- file=DESCRIPTOR,
- create_key=_descriptor._internal_create_key,
- ),
- _descriptor.FieldDescriptor(
- name="approx_num_neighbors",
- full_name="google.cloud.aiplatform.container.v1beta1.MatchRequest.approx_num_neighbors",
- index=5,
- number=6,
- type=5,
- cpp_type=1,
- label=1,
- has_default_value=False,
- default_value=0,
- message_type=None,
- enum_type=None,
- containing_type=None,
- is_extension=False,
- extension_scope=None,
- serialized_options=None,
- file=DESCRIPTOR,
- create_key=_descriptor._internal_create_key,
- ),
- _descriptor.FieldDescriptor(
- name="leaf_nodes_to_search_percent_override",
- full_name="google.cloud.aiplatform.container.v1beta1.MatchRequest.leaf_nodes_to_search_percent_override",
- index=6,
- number=7,
- type=5,
- cpp_type=1,
- label=1,
- has_default_value=False,
- default_value=0,
- message_type=None,
- enum_type=None,
- containing_type=None,
- is_extension=False,
- extension_scope=None,
- serialized_options=None,
- file=DESCRIPTOR,
- create_key=_descriptor._internal_create_key,
- ),
- ],
- extensions=[],
- nested_types=[],
- enum_types=[],
- serialized_options=None,
- is_extendable=False,
- syntax="proto3",
- extension_ranges=[],
- oneofs=[],
- serialized_start=92,
- serialized_end=371,
+_MATCHREQUEST = DESCRIPTOR.message_types_by_name["MatchRequest"]
+_MATCHRESPONSE = DESCRIPTOR.message_types_by_name["MatchResponse"]
+_MATCHRESPONSE_NEIGHBOR = _MATCHRESPONSE.nested_types_by_name["Neighbor"]
+_BATCHMATCHREQUEST = DESCRIPTOR.message_types_by_name["BatchMatchRequest"]
+_BATCHMATCHREQUEST_BATCHMATCHREQUESTPERINDEX = _BATCHMATCHREQUEST.nested_types_by_name[
+ "BatchMatchRequestPerIndex"
+]
+_BATCHMATCHRESPONSE = DESCRIPTOR.message_types_by_name["BatchMatchResponse"]
+_BATCHMATCHRESPONSE_BATCHMATCHRESPONSEPERINDEX = (
+ _BATCHMATCHRESPONSE.nested_types_by_name["BatchMatchResponsePerIndex"]
)
-
-
-_MATCHRESPONSE_NEIGHBOR = _descriptor.Descriptor(
- name="Neighbor",
- full_name="google.cloud.aiplatform.container.v1beta1.MatchResponse.Neighbor",
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- create_key=_descriptor._internal_create_key,
- fields=[
- _descriptor.FieldDescriptor(
- name="id",
- full_name="google.cloud.aiplatform.container.v1beta1.MatchResponse.Neighbor.id",
- index=0,
- number=1,
- type=9,
- cpp_type=9,
- label=1,
- has_default_value=False,
- default_value=b"".decode("utf-8"),
- message_type=None,
- enum_type=None,
- containing_type=None,
- is_extension=False,
- extension_scope=None,
- serialized_options=None,
- file=DESCRIPTOR,
- create_key=_descriptor._internal_create_key,
- ),
- _descriptor.FieldDescriptor(
- name="distance",
- full_name="google.cloud.aiplatform.container.v1beta1.MatchResponse.Neighbor.distance",
- index=1,
- number=2,
- type=1,
- cpp_type=5,
- label=1,
- has_default_value=False,
- default_value=float(0),
- message_type=None,
- enum_type=None,
- containing_type=None,
- is_extension=False,
- extension_scope=None,
- serialized_options=None,
- file=DESCRIPTOR,
- create_key=_descriptor._internal_create_key,
- ),
- ],
- extensions=[],
- nested_types=[],
- enum_types=[],
- serialized_options=None,
- is_extendable=False,
- syntax="proto3",
- extension_ranges=[],
- oneofs=[],
- serialized_start=476,
- serialized_end=516,
-)
-
-_MATCHRESPONSE = _descriptor.Descriptor(
- name="MatchResponse",
- full_name="google.cloud.aiplatform.container.v1beta1.MatchResponse",
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- create_key=_descriptor._internal_create_key,
- fields=[
- _descriptor.FieldDescriptor(
- name="neighbor",
- full_name="google.cloud.aiplatform.container.v1beta1.MatchResponse.neighbor",
- index=0,
- number=1,
- type=11,
- cpp_type=10,
- label=3,
- has_default_value=False,
- default_value=[],
- message_type=None,
- enum_type=None,
- containing_type=None,
- is_extension=False,
- extension_scope=None,
- serialized_options=None,
- file=DESCRIPTOR,
- create_key=_descriptor._internal_create_key,
- ),
- ],
- extensions=[],
- nested_types=[
- _MATCHRESPONSE_NEIGHBOR,
- ],
- enum_types=[],
- serialized_options=None,
- is_extendable=False,
- syntax="proto3",
- extension_ranges=[],
- oneofs=[],
- serialized_start=374,
- serialized_end=516,
-)
-
-
-_BATCHMATCHREQUEST_BATCHMATCHREQUESTPERINDEX = _descriptor.Descriptor(
- name="BatchMatchRequestPerIndex",
- full_name="google.cloud.aiplatform.container.v1beta1.BatchMatchRequest.BatchMatchRequestPerIndex",
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- create_key=_descriptor._internal_create_key,
- fields=[
- _descriptor.FieldDescriptor(
- name="deployed_index_id",
- full_name="google.cloud.aiplatform.container.v1beta1.BatchMatchRequest.BatchMatchRequestPerIndex.deployed_index_id",
- index=0,
- number=1,
- type=9,
- cpp_type=9,
- label=1,
- has_default_value=False,
- default_value=b"".decode("utf-8"),
- message_type=None,
- enum_type=None,
- containing_type=None,
- is_extension=False,
- extension_scope=None,
- serialized_options=None,
- file=DESCRIPTOR,
- create_key=_descriptor._internal_create_key,
- ),
- _descriptor.FieldDescriptor(
- name="requests",
- full_name="google.cloud.aiplatform.container.v1beta1.BatchMatchRequest.BatchMatchRequestPerIndex.requests",
- index=1,
- number=2,
- type=11,
- cpp_type=10,
- label=3,
- has_default_value=False,
- default_value=[],
- message_type=None,
- enum_type=None,
- containing_type=None,
- is_extension=False,
- extension_scope=None,
- serialized_options=None,
- file=DESCRIPTOR,
- create_key=_descriptor._internal_create_key,
- ),
- _descriptor.FieldDescriptor(
- name="low_level_batch_size",
- full_name="google.cloud.aiplatform.container.v1beta1.BatchMatchRequest.BatchMatchRequestPerIndex.low_level_batch_size",
- index=2,
- number=3,
- type=5,
- cpp_type=1,
- label=1,
- has_default_value=False,
- default_value=0,
- message_type=None,
- enum_type=None,
- containing_type=None,
- is_extension=False,
- extension_scope=None,
- serialized_options=None,
- file=DESCRIPTOR,
- create_key=_descriptor._internal_create_key,
- ),
- ],
- extensions=[],
- nested_types=[],
- enum_types=[],
- serialized_options=None,
- is_extendable=False,
- syntax="proto3",
- extension_ranges=[],
- oneofs=[],
- serialized_start=647,
- serialized_end=806,
-)
-
-_BATCHMATCHREQUEST = _descriptor.Descriptor(
- name="BatchMatchRequest",
- full_name="google.cloud.aiplatform.container.v1beta1.BatchMatchRequest",
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- create_key=_descriptor._internal_create_key,
- fields=[
- _descriptor.FieldDescriptor(
- name="requests",
- full_name="google.cloud.aiplatform.container.v1beta1.BatchMatchRequest.requests",
- index=0,
- number=1,
- type=11,
- cpp_type=10,
- label=3,
- has_default_value=False,
- default_value=[],
- message_type=None,
- enum_type=None,
- containing_type=None,
- is_extension=False,
- extension_scope=None,
- serialized_options=None,
- file=DESCRIPTOR,
- create_key=_descriptor._internal_create_key,
- ),
- ],
- extensions=[],
- nested_types=[
- _BATCHMATCHREQUEST_BATCHMATCHREQUESTPERINDEX,
- ],
- enum_types=[],
- serialized_options=None,
- is_extendable=False,
- syntax="proto3",
- extension_ranges=[],
- oneofs=[],
- serialized_start=519,
- serialized_end=806,
-)
-
-
-_BATCHMATCHRESPONSE_BATCHMATCHRESPONSEPERINDEX = _descriptor.Descriptor(
- name="BatchMatchResponsePerIndex",
- full_name="google.cloud.aiplatform.container.v1beta1.BatchMatchResponse.BatchMatchResponsePerIndex",
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- create_key=_descriptor._internal_create_key,
- fields=[
- _descriptor.FieldDescriptor(
- name="deployed_index_id",
- full_name="google.cloud.aiplatform.container.v1beta1.BatchMatchResponse.BatchMatchResponsePerIndex.deployed_index_id",
- index=0,
- number=1,
- type=9,
- cpp_type=9,
- label=1,
- has_default_value=False,
- default_value=b"".decode("utf-8"),
- message_type=None,
- enum_type=None,
- containing_type=None,
- is_extension=False,
- extension_scope=None,
- serialized_options=None,
- file=DESCRIPTOR,
- create_key=_descriptor._internal_create_key,
- ),
- _descriptor.FieldDescriptor(
- name="responses",
- full_name="google.cloud.aiplatform.container.v1beta1.BatchMatchResponse.BatchMatchResponsePerIndex.responses",
- index=1,
- number=2,
- type=11,
- cpp_type=10,
- label=3,
- has_default_value=False,
- default_value=[],
- message_type=None,
- enum_type=None,
- containing_type=None,
- is_extension=False,
- extension_scope=None,
- serialized_options=None,
- file=DESCRIPTOR,
- create_key=_descriptor._internal_create_key,
- ),
- _descriptor.FieldDescriptor(
- name="status",
- full_name="google.cloud.aiplatform.container.v1beta1.BatchMatchResponse.BatchMatchResponsePerIndex.status",
- index=2,
- number=3,
- type=11,
- cpp_type=10,
- label=1,
- has_default_value=False,
- default_value=None,
- message_type=None,
- enum_type=None,
- containing_type=None,
- is_extension=False,
- extension_scope=None,
- serialized_options=None,
- file=DESCRIPTOR,
- create_key=_descriptor._internal_create_key,
- ),
- ],
- extensions=[],
- nested_types=[],
- enum_types=[],
- serialized_options=None,
- is_extendable=False,
- syntax="proto3",
- extension_ranges=[],
- oneofs=[],
- serialized_start=941,
- serialized_end=1109,
-)
-
-_BATCHMATCHRESPONSE = _descriptor.Descriptor(
- name="BatchMatchResponse",
- full_name="google.cloud.aiplatform.container.v1beta1.BatchMatchResponse",
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- create_key=_descriptor._internal_create_key,
- fields=[
- _descriptor.FieldDescriptor(
- name="responses",
- full_name="google.cloud.aiplatform.container.v1beta1.BatchMatchResponse.responses",
- index=0,
- number=1,
- type=11,
- cpp_type=10,
- label=3,
- has_default_value=False,
- default_value=[],
- message_type=None,
- enum_type=None,
- containing_type=None,
- is_extension=False,
- extension_scope=None,
- serialized_options=None,
- file=DESCRIPTOR,
- create_key=_descriptor._internal_create_key,
- ),
- ],
- extensions=[],
- nested_types=[
- _BATCHMATCHRESPONSE_BATCHMATCHRESPONSEPERINDEX,
- ],
- enum_types=[],
- serialized_options=None,
- is_extendable=False,
- syntax="proto3",
- extension_ranges=[],
- oneofs=[],
- serialized_start=809,
- serialized_end=1109,
-)
-
-
-_NAMESPACE = _descriptor.Descriptor(
- name="Namespace",
- full_name="google.cloud.aiplatform.container.v1beta1.Namespace",
- filename=None,
- file=DESCRIPTOR,
- containing_type=None,
- create_key=_descriptor._internal_create_key,
- fields=[
- _descriptor.FieldDescriptor(
- name="name",
- full_name="google.cloud.aiplatform.container.v1beta1.Namespace.name",
- index=0,
- number=1,
- type=9,
- cpp_type=9,
- label=1,
- has_default_value=False,
- default_value=b"".decode("utf-8"),
- message_type=None,
- enum_type=None,
- containing_type=None,
- is_extension=False,
- extension_scope=None,
- serialized_options=None,
- file=DESCRIPTOR,
- create_key=_descriptor._internal_create_key,
- ),
- _descriptor.FieldDescriptor(
- name="allow_tokens",
- full_name="google.cloud.aiplatform.container.v1beta1.Namespace.allow_tokens",
- index=1,
- number=2,
- type=9,
- cpp_type=9,
- label=3,
- has_default_value=False,
- default_value=[],
- message_type=None,
- enum_type=None,
- containing_type=None,
- is_extension=False,
- extension_scope=None,
- serialized_options=None,
- file=DESCRIPTOR,
- create_key=_descriptor._internal_create_key,
- ),
- _descriptor.FieldDescriptor(
- name="deny_tokens",
- full_name="google.cloud.aiplatform.container.v1beta1.Namespace.deny_tokens",
- index=2,
- number=3,
- type=9,
- cpp_type=9,
- label=3,
- has_default_value=False,
- default_value=[],
- message_type=None,
- enum_type=None,
- containing_type=None,
- is_extension=False,
- extension_scope=None,
- serialized_options=None,
- file=DESCRIPTOR,
- create_key=_descriptor._internal_create_key,
- ),
- ],
- extensions=[],
- nested_types=[],
- enum_types=[],
- serialized_options=None,
- is_extendable=False,
- syntax="proto3",
- extension_ranges=[],
- oneofs=[],
- serialized_start=1111,
- serialized_end=1179,
-)
-
-_MATCHREQUEST.fields_by_name["restricts"].message_type = _NAMESPACE
-_MATCHRESPONSE_NEIGHBOR.containing_type = _MATCHRESPONSE
-_MATCHRESPONSE.fields_by_name["neighbor"].message_type = _MATCHRESPONSE_NEIGHBOR
-_BATCHMATCHREQUEST_BATCHMATCHREQUESTPERINDEX.fields_by_name[
- "requests"
-].message_type = _MATCHREQUEST
-_BATCHMATCHREQUEST_BATCHMATCHREQUESTPERINDEX.containing_type = _BATCHMATCHREQUEST
-_BATCHMATCHREQUEST.fields_by_name[
- "requests"
-].message_type = _BATCHMATCHREQUEST_BATCHMATCHREQUESTPERINDEX
-_BATCHMATCHRESPONSE_BATCHMATCHRESPONSEPERINDEX.fields_by_name[
- "responses"
-].message_type = _MATCHRESPONSE
-_BATCHMATCHRESPONSE_BATCHMATCHRESPONSEPERINDEX.fields_by_name[
- "status"
-].message_type = google_dot_rpc_dot_status__pb2._STATUS
-_BATCHMATCHRESPONSE_BATCHMATCHRESPONSEPERINDEX.containing_type = _BATCHMATCHRESPONSE
-_BATCHMATCHRESPONSE.fields_by_name[
- "responses"
-].message_type = _BATCHMATCHRESPONSE_BATCHMATCHRESPONSEPERINDEX
-DESCRIPTOR.message_types_by_name["MatchRequest"] = _MATCHREQUEST
-DESCRIPTOR.message_types_by_name["MatchResponse"] = _MATCHRESPONSE
-DESCRIPTOR.message_types_by_name["BatchMatchRequest"] = _BATCHMATCHREQUEST
-DESCRIPTOR.message_types_by_name["BatchMatchResponse"] = _BATCHMATCHRESPONSE
-DESCRIPTOR.message_types_by_name["Namespace"] = _NAMESPACE
-_sym_db.RegisterFileDescriptor(DESCRIPTOR)
-
+_NAMESPACE = DESCRIPTOR.message_types_by_name["Namespace"]
MatchRequest = _reflection.GeneratedProtocolMessageType(
"MatchRequest",
(_message.Message,),
{
"DESCRIPTOR": _MATCHREQUEST,
- "__module__": "match_service_pb2"
+ "__module__": "google.cloud.aiplatform.matching_engine._protos.match_service_pb2"
# @@protoc_insertion_point(class_scope:google.cloud.aiplatform.container.v1beta1.MatchRequest)
},
)
@@ -667,12 +68,12 @@
(_message.Message,),
{
"DESCRIPTOR": _MATCHRESPONSE_NEIGHBOR,
- "__module__": "match_service_pb2"
+ "__module__": "google.cloud.aiplatform.matching_engine._protos.match_service_pb2"
# @@protoc_insertion_point(class_scope:google.cloud.aiplatform.container.v1beta1.MatchResponse.Neighbor)
},
),
"DESCRIPTOR": _MATCHRESPONSE,
- "__module__": "match_service_pb2"
+ "__module__": "google.cloud.aiplatform.matching_engine._protos.match_service_pb2"
# @@protoc_insertion_point(class_scope:google.cloud.aiplatform.container.v1beta1.MatchResponse)
},
)
@@ -688,12 +89,12 @@
(_message.Message,),
{
"DESCRIPTOR": _BATCHMATCHREQUEST_BATCHMATCHREQUESTPERINDEX,
- "__module__": "match_service_pb2"
+ "__module__": "google.cloud.aiplatform.matching_engine._protos.match_service_pb2"
# @@protoc_insertion_point(class_scope:google.cloud.aiplatform.container.v1beta1.BatchMatchRequest.BatchMatchRequestPerIndex)
},
),
"DESCRIPTOR": _BATCHMATCHREQUEST,
- "__module__": "match_service_pb2"
+ "__module__": "google.cloud.aiplatform.matching_engine._protos.match_service_pb2"
# @@protoc_insertion_point(class_scope:google.cloud.aiplatform.container.v1beta1.BatchMatchRequest)
},
)
@@ -709,12 +110,12 @@
(_message.Message,),
{
"DESCRIPTOR": _BATCHMATCHRESPONSE_BATCHMATCHRESPONSEPERINDEX,
- "__module__": "match_service_pb2"
+ "__module__": "google.cloud.aiplatform.matching_engine._protos.match_service_pb2"
# @@protoc_insertion_point(class_scope:google.cloud.aiplatform.container.v1beta1.BatchMatchResponse.BatchMatchResponsePerIndex)
},
),
"DESCRIPTOR": _BATCHMATCHRESPONSE,
- "__module__": "match_service_pb2"
+ "__module__": "google.cloud.aiplatform.matching_engine._protos.match_service_pb2"
# @@protoc_insertion_point(class_scope:google.cloud.aiplatform.container.v1beta1.BatchMatchResponse)
},
)
@@ -726,47 +127,32 @@
(_message.Message,),
{
"DESCRIPTOR": _NAMESPACE,
- "__module__": "match_service_pb2"
+ "__module__": "google.cloud.aiplatform.matching_engine._protos.match_service_pb2"
# @@protoc_insertion_point(class_scope:google.cloud.aiplatform.container.v1beta1.Namespace)
},
)
_sym_db.RegisterMessage(Namespace)
-
-_MATCHSERVICE = _descriptor.ServiceDescriptor(
- name="MatchService",
- full_name="google.cloud.aiplatform.container.v1beta1.MatchService",
- file=DESCRIPTOR,
- index=0,
- serialized_options=None,
- create_key=_descriptor._internal_create_key,
- serialized_start=1182,
- serialized_end=1464,
- methods=[
- _descriptor.MethodDescriptor(
- name="Match",
- full_name="google.cloud.aiplatform.container.v1beta1.MatchService.Match",
- index=0,
- containing_service=None,
- input_type=_MATCHREQUEST,
- output_type=_MATCHRESPONSE,
- serialized_options=None,
- create_key=_descriptor._internal_create_key,
- ),
- _descriptor.MethodDescriptor(
- name="BatchMatch",
- full_name="google.cloud.aiplatform.container.v1beta1.MatchService.BatchMatch",
- index=1,
- containing_service=None,
- input_type=_BATCHMATCHREQUEST,
- output_type=_BATCHMATCHRESPONSE,
- serialized_options=None,
- create_key=_descriptor._internal_create_key,
- ),
- ],
-)
-_sym_db.RegisterServiceDescriptor(_MATCHSERVICE)
-
-DESCRIPTOR.services_by_name["MatchService"] = _MATCHSERVICE
-
+_MATCHSERVICE = DESCRIPTOR.services_by_name["MatchService"]
+if _descriptor._USE_C_DESCRIPTORS == False:
+
+ DESCRIPTOR._options = None
+ _MATCHREQUEST._serialized_start = 140
+ _MATCHREQUEST._serialized_end = 419
+ _MATCHRESPONSE._serialized_start = 422
+ _MATCHRESPONSE._serialized_end = 564
+ _MATCHRESPONSE_NEIGHBOR._serialized_start = 524
+ _MATCHRESPONSE_NEIGHBOR._serialized_end = 564
+ _BATCHMATCHREQUEST._serialized_start = 567
+ _BATCHMATCHREQUEST._serialized_end = 854
+ _BATCHMATCHREQUEST_BATCHMATCHREQUESTPERINDEX._serialized_start = 695
+ _BATCHMATCHREQUEST_BATCHMATCHREQUESTPERINDEX._serialized_end = 854
+ _BATCHMATCHRESPONSE._serialized_start = 857
+ _BATCHMATCHRESPONSE._serialized_end = 1157
+ _BATCHMATCHRESPONSE_BATCHMATCHRESPONSEPERINDEX._serialized_start = 989
+ _BATCHMATCHRESPONSE_BATCHMATCHRESPONSEPERINDEX._serialized_end = 1157
+ _NAMESPACE._serialized_start = 1159
+ _NAMESPACE._serialized_end = 1227
+ _MATCHSERVICE._serialized_start = 1230
+ _MATCHSERVICE._serialized_end = 1512
# @@protoc_insertion_point(module_scope)
diff --git a/google/cloud/aiplatform/matching_engine/_protos/match_service_pb2_grpc.py b/google/cloud/aiplatform/matching_engine/_protos/match_service_pb2_grpc.py
index 9c99081a16..2c0c14f8ed 100644
--- a/google/cloud/aiplatform/matching_engine/_protos/match_service_pb2_grpc.py
+++ b/google/cloud/aiplatform/matching_engine/_protos/match_service_pb2_grpc.py
@@ -14,7 +14,23 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+
"""Client and server classes corresponding to protobuf-defined services."""
from google.cloud.aiplatform.matching_engine._protos import match_service_pb2
From df78407b2f14c95c9e84b4b1375a8de5bc9c7bb5 Mon Sep 17 00:00:00 2001
From: Sam Goodman
Date: Thu, 26 May 2022 16:57:50 -0700
Subject: [PATCH 13/14] chore: release 1.13.1 (#1395)
Release-As: 1.13.1
Co-authored-by: Sam Goodman
From dc3be45c249132c3f27752d66a67f08bfe97ea42 Mon Sep 17 00:00:00 2001
From: "release-please[bot]"
<55107282+release-please[bot]@users.noreply.github.com>
Date: Thu, 26 May 2022 20:17:04 -0400
Subject: [PATCH 14/14] chore(main): release 1.13.1 (#1216)
Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com>
---
CHANGELOG.md | 25 +++++++++++++++++++++++++
google/cloud/aiplatform/version.py | 2 +-
2 files changed, 26 insertions(+), 1 deletion(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 04e51a15bd..340873320d 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,30 @@
# Changelog
+### [1.13.1](https://github.com/googleapis/python-aiplatform/compare/v1.13.0...v1.13.1) (2022-05-26)
+
+
+### Features
+
+* add batch_size kwarg for batch prediction jobs ([#1194](https://github.com/googleapis/python-aiplatform/issues/1194)) ([50bdb01](https://github.com/googleapis/python-aiplatform/commit/50bdb01504740ed31de788d8a160f3e2be7f55df))
+* add update endpoint ([#1162](https://github.com/googleapis/python-aiplatform/issues/1162)) ([0ecfe1e](https://github.com/googleapis/python-aiplatform/commit/0ecfe1e7ab8687c13cb4267985e8b6ebc7bd2534))
+* support autoscaling metrics when deploying models ([#1197](https://github.com/googleapis/python-aiplatform/issues/1197)) ([095717c](https://github.com/googleapis/python-aiplatform/commit/095717c8b77dc5d66e677413a437ea6ed92e0b1a))
+
+
+### Bug Fixes
+
+* check in service proto file ([#1174](https://github.com/googleapis/python-aiplatform/issues/1174)) ([5fdf151](https://github.com/googleapis/python-aiplatform/commit/5fdf151ee0d0a630c07a75dc8f19906e7ad1aa8a))
+* regenerate pb2 files using grpcio-tools ([#1394](https://github.com/googleapis/python-aiplatform/issues/1394)) ([406c868](https://github.com/googleapis/python-aiplatform/commit/406c868344280d424f4191c98bcbbdeaf947b2d1))
+
+
+### Documentation
+
+* update aiplatform SDK arrangement for Sphinx ([#1163](https://github.com/googleapis/python-aiplatform/issues/1163)) ([e9510ea](https://github.com/googleapis/python-aiplatform/commit/e9510ea6344a296e0c93ddf32280cf4c010ee4f1))
+
+
+### Miscellaneous Chores
+
+* release 1.13.1 ([#1395](https://github.com/googleapis/python-aiplatform/issues/1395)) ([df78407](https://github.com/googleapis/python-aiplatform/commit/df78407b2f14c95c9e84b4b1375a8de5bc9c7bb5))
+
## [1.13.0](https://github.com/googleapis/python-aiplatform/compare/v1.12.1...v1.13.0) (2022-05-09)
diff --git a/google/cloud/aiplatform/version.py b/google/cloud/aiplatform/version.py
index 2313afcd27..ec04f5d40b 100644
--- a/google/cloud/aiplatform/version.py
+++ b/google/cloud/aiplatform/version.py
@@ -15,4 +15,4 @@
# limitations under the License.
#
-__version__ = "1.13.0"
+__version__ = "1.13.1"