From 1c341544e9bd94c6ff0ee41177565c8c078673a3 Mon Sep 17 00:00:00 2001
From: Anthonios Partheniou
Date: Fri, 11 Feb 2022 10:17:43 -0500
Subject: [PATCH 01/15] fix(deps): allow google-cloud-storage < 3.0.0dev
(#1008)
---
setup.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/setup.py b/setup.py
index 8c1872c300..2deda8b68f 100644
--- a/setup.py
+++ b/setup.py
@@ -98,7 +98,7 @@
"google-api-core[grpc] >= 1.26.0, <3.0.0dev",
"proto-plus >= 1.10.1",
"packaging >= 14.3",
- "google-cloud-storage >= 1.32.0, < 2.0.0dev",
+ "google-cloud-storage >= 1.32.0, < 3.0.0dev",
"google-cloud-bigquery >= 1.15.0, < 3.0.0dev",
),
extras_require={
From c10923b47b9b9941d14ae2c5398348d971a23f9d Mon Sep 17 00:00:00 2001
From: Sara Robinson
Date: Mon, 14 Feb 2022 12:44:11 -0500
Subject: [PATCH 02/15] fix: show logs when TFX pipelines are submitted (#976)
* Fix for showing logs when TFX pipelines are submitted
* Add sdkVersion and TFX spec to tests
* Remove params from tfx pipeline spec
* Add new tests for TFX pipelines
* Update tests after linting
---
google/cloud/aiplatform/pipeline_jobs.py | 5 ++
tests/unit/aiplatform/test_pipeline_jobs.py | 86 +++++++++++++++++++++
2 files changed, 91 insertions(+)
diff --git a/google/cloud/aiplatform/pipeline_jobs.py b/google/cloud/aiplatform/pipeline_jobs.py
index c756589513..d91a711c65 100644
--- a/google/cloud/aiplatform/pipeline_jobs.py
+++ b/google/cloud/aiplatform/pipeline_jobs.py
@@ -16,6 +16,7 @@
#
import datetime
+import logging
import time
import re
from typing import Any, Dict, List, Optional
@@ -273,6 +274,10 @@ def submit(
if network:
self._gca_resource.network = network
+ # Prevents logs from being supressed on TFX pipelines
+ if self._gca_resource.pipeline_spec.get("sdkVersion", "").startswith("tfx"):
+ _LOGGER.setLevel(logging.INFO)
+
_LOGGER.log_create_with_lro(self.__class__)
self._gca_resource = self.api_client.create_pipeline_job(
diff --git a/tests/unit/aiplatform/test_pipeline_jobs.py b/tests/unit/aiplatform/test_pipeline_jobs.py
index 81f14a7ead..ac2fca8ba8 100644
--- a/tests/unit/aiplatform/test_pipeline_jobs.py
+++ b/tests/unit/aiplatform/test_pipeline_jobs.py
@@ -92,6 +92,16 @@
"schemaVersion": "2.1.0",
"components": {},
}
+_TEST_TFX_PIPELINE_SPEC = {
+ "pipelineInfo": {"name": "my-pipeline"},
+ "root": {
+ "dag": {"tasks": {}},
+ "inputDefinitions": {"parameters": {"string_param": {"type": "STRING"}}},
+ },
+ "schemaVersion": "2.0.0",
+ "sdkVersion": "tfx-1.4.0",
+ "components": {},
+}
_TEST_PIPELINE_JOB_LEGACY = {
"runtimeConfig": {},
@@ -101,6 +111,10 @@
"runtimeConfig": {"parameterValues": _TEST_PIPELINE_PARAMETER_VALUES},
"pipelineSpec": _TEST_PIPELINE_SPEC,
}
+_TEST_PIPELINE_JOB_TFX = {
+ "runtimeConfig": {},
+ "pipelineSpec": _TEST_TFX_PIPELINE_SPEC,
+}
_TEST_PIPELINE_GET_METHOD_NAME = "get_fake_pipeline_job"
_TEST_PIPELINE_LIST_METHOD_NAME = "list_fake_pipeline_jobs"
@@ -378,6 +392,78 @@ def test_run_call_pipeline_service_create_legacy(
gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_SUCCEEDED
)
+ @pytest.mark.parametrize(
+ "job_spec_json", [_TEST_TFX_PIPELINE_SPEC, _TEST_PIPELINE_JOB_TFX],
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_tfx(
+ self,
+ mock_pipeline_service_create,
+ mock_pipeline_service_get,
+ job_spec_json,
+ mock_load_json,
+ sync,
+ ):
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_GCS_BUCKET_NAME,
+ location=_TEST_LOCATION,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = pipeline_jobs.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ template_path=_TEST_TEMPLATE_PATH,
+ job_id=_TEST_PIPELINE_JOB_ID,
+ parameter_values=_TEST_PIPELINE_PARAMETER_VALUES_LEGACY,
+ enable_caching=True,
+ )
+
+ job.run(
+ service_account=_TEST_SERVICE_ACCOUNT, network=_TEST_NETWORK, sync=sync,
+ )
+
+ if not sync:
+ job.wait()
+
+ expected_runtime_config_dict = {
+ "gcsOutputDirectory": _TEST_GCS_BUCKET_NAME,
+ "parameters": {"string_param": {"stringValue": "hello"}},
+ }
+ runtime_config = gca_pipeline_job_v1.PipelineJob.RuntimeConfig()._pb
+ json_format.ParseDict(expected_runtime_config_dict, runtime_config)
+
+ pipeline_spec = job_spec_json.get("pipelineSpec") or job_spec_json
+
+ # Construct expected request
+ expected_gapic_pipeline_job = gca_pipeline_job_v1.PipelineJob(
+ display_name=_TEST_PIPELINE_JOB_DISPLAY_NAME,
+ pipeline_spec={
+ "components": {},
+ "pipelineInfo": pipeline_spec["pipelineInfo"],
+ "root": pipeline_spec["root"],
+ "schemaVersion": "2.0.0",
+ "sdkVersion": "tfx-1.4.0",
+ },
+ runtime_config=runtime_config,
+ service_account=_TEST_SERVICE_ACCOUNT,
+ network=_TEST_NETWORK,
+ )
+
+ mock_pipeline_service_create.assert_called_once_with(
+ parent=_TEST_PARENT,
+ pipeline_job=expected_gapic_pipeline_job,
+ pipeline_job_id=_TEST_PIPELINE_JOB_ID,
+ )
+
+ mock_pipeline_service_get.assert_called_with(
+ name=_TEST_PIPELINE_JOB_NAME, retry=base._DEFAULT_RETRY
+ )
+
+ assert job._gca_resource == make_pipeline_job(
+ gca_pipeline_state_v1.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
@pytest.mark.parametrize(
"job_spec_json", [_TEST_PIPELINE_SPEC, _TEST_PIPELINE_JOB],
)
From 89078e0d2a719e2b0d25ae36ecd06c356a5a33c9 Mon Sep 17 00:00:00 2001
From: Ivan Cheung
Date: Tue, 15 Feb 2022 16:49:30 -0500
Subject: [PATCH 03/15] feat: Added scheduling to CustomTrainingJob,
CustomPythonPackageTrainingJob, CustomContainerTrainingJob (#970)
* Added scheduling to customtrainingjob
* Added unit tests
Fixed tests
Fixed test
fix: Broken test
* Added integration test
* Removed comment
* Updated e2e tabular test
* Fixed lint issue
* Simplfied tests
* Added more assertions
---
google/cloud/aiplatform/training_jobs.py | 83 ++++++++
tests/system/aiplatform/test_e2e_tabular.py | 15 ++
tests/unit/aiplatform/test_training_jobs.py | 220 +++++++++++++++++++-
3 files changed, 317 insertions(+), 1 deletion(-)
diff --git a/google/cloud/aiplatform/training_jobs.py b/google/cloud/aiplatform/training_jobs.py
index 38aafef4fa..a0e8ed8125 100644
--- a/google/cloud/aiplatform/training_jobs.py
+++ b/google/cloud/aiplatform/training_jobs.py
@@ -1379,6 +1379,8 @@ def _prepare_training_task_inputs_and_output_dir(
base_output_dir: Optional[str] = None,
service_account: Optional[str] = None,
network: Optional[str] = None,
+ timeout: Optional[int] = None,
+ restart_job_on_worker_restart: bool = False,
enable_web_access: bool = False,
tensorboard: Optional[str] = None,
) -> Tuple[Dict, str]:
@@ -1398,6 +1400,13 @@ def _prepare_training_task_inputs_and_output_dir(
should be peered. For example, projects/12345/global/networks/myVPC.
Private services access must already be configured for the network.
If left unspecified, the job is not peered with any network.
+ timeout (int):
+ The maximum job running time in seconds. The default is 7 days.
+ restart_job_on_worker_restart (bool):
+ Restarts the entire CustomJob if a worker
+ gets restarted. This feature can be used by
+ distributed training jobs that are not resilient
+ to workers leaving and joining a job.
enable_web_access (bool):
Whether you want Vertex AI to enable interactive shell access
to training containers.
@@ -1442,6 +1451,14 @@ def _prepare_training_task_inputs_and_output_dir(
if enable_web_access:
training_task_inputs["enable_web_access"] = enable_web_access
+ if timeout or restart_job_on_worker_restart:
+ timeout = f"{timeout}s" if timeout else None
+ scheduling = {
+ "timeout": timeout,
+ "restart_job_on_worker_restart": restart_job_on_worker_restart,
+ }
+ training_task_inputs["scheduling"] = scheduling
+
return training_task_inputs, base_output_dir
@property
@@ -1794,6 +1811,8 @@ def run(
test_filter_split: Optional[str] = None,
predefined_split_column_name: Optional[str] = None,
timestamp_split_column_name: Optional[str] = None,
+ timeout: Optional[int] = None,
+ restart_job_on_worker_restart: bool = False,
enable_web_access: bool = False,
tensorboard: Optional[str] = None,
sync=True,
@@ -2014,6 +2033,13 @@ def run(
that piece is ignored by the pipeline.
Supported only for tabular and time series Datasets.
+ timeout (int):
+ The maximum job running time in seconds. The default is 7 days.
+ restart_job_on_worker_restart (bool):
+ Restarts the entire CustomJob if a worker
+ gets restarted. This feature can be used by
+ distributed training jobs that are not resilient
+ to workers leaving and joining a job.
enable_web_access (bool):
Whether you want Vertex AI to enable interactive shell access
to training containers.
@@ -2080,6 +2106,8 @@ def run(
test_filter_split=test_filter_split,
predefined_split_column_name=predefined_split_column_name,
timestamp_split_column_name=timestamp_split_column_name,
+ timeout=timeout,
+ restart_job_on_worker_restart=restart_job_on_worker_restart,
enable_web_access=enable_web_access,
tensorboard=tensorboard,
reduction_server_container_uri=reduction_server_container_uri
@@ -2117,6 +2145,8 @@ def _run(
test_filter_split: Optional[str] = None,
predefined_split_column_name: Optional[str] = None,
timestamp_split_column_name: Optional[str] = None,
+ timeout: Optional[int] = None,
+ restart_job_on_worker_restart: bool = False,
enable_web_access: bool = False,
tensorboard: Optional[str] = None,
reduction_server_container_uri: Optional[str] = None,
@@ -2237,6 +2267,13 @@ def _run(
that piece is ignored by the pipeline.
Supported only for tabular and time series Datasets.
+ timeout (int):
+ The maximum job running time in seconds. The default is 7 days.
+ restart_job_on_worker_restart (bool):
+ Restarts the entire CustomJob if a worker
+ gets restarted. This feature can be used by
+ distributed training jobs that are not resilient
+ to workers leaving and joining a job.
enable_web_access (bool):
Whether you want Vertex AI to enable interactive shell access
to training containers.
@@ -2309,6 +2346,8 @@ def _run(
base_output_dir=base_output_dir,
service_account=service_account,
network=network,
+ timeout=timeout,
+ restart_job_on_worker_restart=restart_job_on_worker_restart,
enable_web_access=enable_web_access,
tensorboard=tensorboard,
)
@@ -2598,6 +2637,8 @@ def run(
test_filter_split: Optional[str] = None,
predefined_split_column_name: Optional[str] = None,
timestamp_split_column_name: Optional[str] = None,
+ timeout: Optional[int] = None,
+ restart_job_on_worker_restart: bool = False,
enable_web_access: bool = False,
tensorboard: Optional[str] = None,
sync=True,
@@ -2811,6 +2852,13 @@ def run(
that piece is ignored by the pipeline.
Supported only for tabular and time series Datasets.
+ timeout (int):
+ The maximum job running time in seconds. The default is 7 days.
+ restart_job_on_worker_restart (bool):
+ Restarts the entire CustomJob if a worker
+ gets restarted. This feature can be used by
+ distributed training jobs that are not resilient
+ to workers leaving and joining a job.
enable_web_access (bool):
Whether you want Vertex AI to enable interactive shell access
to training containers.
@@ -2876,6 +2924,8 @@ def run(
test_filter_split=test_filter_split,
predefined_split_column_name=predefined_split_column_name,
timestamp_split_column_name=timestamp_split_column_name,
+ timeout=timeout,
+ restart_job_on_worker_restart=restart_job_on_worker_restart,
enable_web_access=enable_web_access,
tensorboard=tensorboard,
reduction_server_container_uri=reduction_server_container_uri
@@ -2912,6 +2962,8 @@ def _run(
test_filter_split: Optional[str] = None,
predefined_split_column_name: Optional[str] = None,
timestamp_split_column_name: Optional[str] = None,
+ timeout: Optional[int] = None,
+ restart_job_on_worker_restart: bool = False,
enable_web_access: bool = False,
tensorboard: Optional[str] = None,
reduction_server_container_uri: Optional[str] = None,
@@ -2965,6 +3017,13 @@ def _run(
should be peered. For example, projects/12345/global/networks/myVPC.
Private services access must already be configured for the network.
If left unspecified, the job is not peered with any network.
+ timeout (int):
+ The maximum job running time in seconds. The default is 7 days.
+ restart_job_on_worker_restart (bool):
+ Restarts the entire CustomJob if a worker
+ gets restarted. This feature can be used by
+ distributed training jobs that are not resilient
+ to workers leaving and joining a job.
bigquery_destination (str):
The BigQuery project location where the training data is to
be written to. In the given project a new dataset is created
@@ -3094,6 +3153,8 @@ def _run(
base_output_dir=base_output_dir,
service_account=service_account,
network=network,
+ timeout=timeout,
+ restart_job_on_worker_restart=restart_job_on_worker_restart,
enable_web_access=enable_web_access,
tensorboard=tensorboard,
)
@@ -5373,6 +5434,8 @@ def run(
test_filter_split: Optional[str] = None,
predefined_split_column_name: Optional[str] = None,
timestamp_split_column_name: Optional[str] = None,
+ timeout: Optional[int] = None,
+ restart_job_on_worker_restart: bool = False,
enable_web_access: bool = False,
tensorboard: Optional[str] = None,
sync=True,
@@ -5586,6 +5649,13 @@ def run(
that piece is ignored by the pipeline.
Supported only for tabular and time series Datasets.
+ timeout (int):
+ The maximum job running time in seconds. The default is 7 days.
+ restart_job_on_worker_restart (bool):
+ Restarts the entire CustomJob if a worker
+ gets restarted. This feature can be used by
+ distributed training jobs that are not resilient
+ to workers leaving and joining a job.
enable_web_access (bool):
Whether you want Vertex AI to enable interactive shell access
to training containers.
@@ -5646,6 +5716,8 @@ def run(
predefined_split_column_name=predefined_split_column_name,
timestamp_split_column_name=timestamp_split_column_name,
bigquery_destination=bigquery_destination,
+ timeout=timeout,
+ restart_job_on_worker_restart=restart_job_on_worker_restart,
enable_web_access=enable_web_access,
tensorboard=tensorboard,
reduction_server_container_uri=reduction_server_container_uri
@@ -5682,6 +5754,8 @@ def _run(
predefined_split_column_name: Optional[str] = None,
timestamp_split_column_name: Optional[str] = None,
bigquery_destination: Optional[str] = None,
+ timeout: Optional[int] = None,
+ restart_job_on_worker_restart: bool = False,
enable_web_access: bool = False,
tensorboard: Optional[str] = None,
reduction_server_container_uri: Optional[str] = None,
@@ -5785,6 +5859,13 @@ def _run(
that piece is ignored by the pipeline.
Supported only for tabular and time series Datasets.
+ timeout (int):
+ The maximum job running time in seconds. The default is 7 days.
+ restart_job_on_worker_restart (bool):
+ Restarts the entire CustomJob if a worker
+ gets restarted. This feature can be used by
+ distributed training jobs that are not resilient
+ to workers leaving and joining a job.
enable_web_access (bool):
Whether you want Vertex AI to enable interactive shell access
to training containers.
@@ -5851,6 +5932,8 @@ def _run(
base_output_dir=base_output_dir,
service_account=service_account,
network=network,
+ timeout=timeout,
+ restart_job_on_worker_restart=restart_job_on_worker_restart,
enable_web_access=enable_web_access,
tensorboard=tensorboard,
)
diff --git a/tests/system/aiplatform/test_e2e_tabular.py b/tests/system/aiplatform/test_e2e_tabular.py
index 3fcae149c3..df6c21f2c3 100644
--- a/tests/system/aiplatform/test_e2e_tabular.py
+++ b/tests/system/aiplatform/test_e2e_tabular.py
@@ -109,6 +109,8 @@ def test_end_to_end_tabular(self, shared_state):
ds,
replica_count=1,
model_display_name=self._make_display_name("custom-housing-model"),
+ timeout=1234,
+ restart_job_on_worker_restart=True,
enable_web_access=True,
sync=False,
)
@@ -147,6 +149,19 @@ def test_end_to_end_tabular(self, shared_state):
# Send online prediction with same instance to both deployed models
# This sample is taken from an observation where median_house_value = 94600
custom_endpoint.wait()
+
+ # Check scheduling is correctly set
+ assert (
+ custom_job._gca_resource.training_task_inputs["scheduling"]["timeout"]
+ == "1234s"
+ )
+ assert (
+ custom_job._gca_resource.training_task_inputs["scheduling"][
+ "restartJobOnWorkerRestart"
+ ]
+ is True
+ )
+
custom_prediction = custom_endpoint.predict([_INSTANCE])
custom_batch_prediction_job.wait()
diff --git a/tests/unit/aiplatform/test_training_jobs.py b/tests/unit/aiplatform/test_training_jobs.py
index b89a69ce24..6eb1a738cf 100644
--- a/tests/unit/aiplatform/test_training_jobs.py
+++ b/tests/unit/aiplatform/test_training_jobs.py
@@ -69,7 +69,7 @@
from google.cloud import storage
from google.protobuf import json_format
from google.protobuf import struct_pb2
-
+from google.protobuf import duration_pb2 # type: ignore
_TEST_BUCKET_NAME = "test-bucket"
_TEST_GCS_PATH_WITHOUT_BUCKET = "path/to/folder"
@@ -183,6 +183,10 @@
_TEST_MODEL_ENCRYPTION_SPEC = gca_encryption_spec.EncryptionSpec(
kms_key_name=_TEST_MODEL_ENCRYPTION_KEY_NAME
)
+
+_TEST_TIMEOUT = 1000
+_TEST_RESTART_JOB_ON_WORKER_RESTART = True
+
_TEST_ENABLE_WEB_ACCESS = True
_TEST_WEB_ACCESS_URIS = {"workerpool0-0": "uri"}
@@ -202,6 +206,21 @@ def _get_custom_job_proto_with_enable_web_access(state=None, name=None, version=
return custom_job_proto
+def _get_custom_job_proto_with_scheduling(state=None, name=None, version="v1"):
+ custom_job_proto = copy.deepcopy(_TEST_BASE_CUSTOM_JOB_PROTO)
+ custom_job_proto.name = name
+ custom_job_proto.state = state
+
+ custom_job_proto.job_spec.scheduling.timeout = duration_pb2.Duration(
+ seconds=_TEST_TIMEOUT
+ )
+ custom_job_proto.job_spec.scheduling.restart_job_on_worker_restart = (
+ _TEST_RESTART_JOB_ON_WORKER_RESTART
+ )
+
+ return custom_job_proto
+
+
def local_copy_method(path):
shutil.copy(path, ".")
return pathlib.Path(path).name
@@ -548,6 +567,22 @@ def make_training_pipeline_with_enable_web_access(state):
return training_pipeline
+def make_training_pipeline_with_scheduling(state):
+ training_pipeline = gca_training_pipeline.TrainingPipeline(
+ name=_TEST_PIPELINE_RESOURCE_NAME,
+ state=state,
+ training_task_inputs={
+ "timeout": f"{_TEST_TIMEOUT}s",
+ "restart_job_on_worker_restart": _TEST_RESTART_JOB_ON_WORKER_RESTART,
+ },
+ )
+ if state == gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING:
+ training_pipeline.training_task_metadata = {
+ "backingCustomJob": _TEST_CUSTOM_JOB_RESOURCE_NAME
+ }
+ return training_pipeline
+
+
@pytest.fixture
def mock_pipeline_service_get():
with mock.patch.object(
@@ -619,6 +654,35 @@ def mock_pipeline_service_get_with_enable_web_access():
yield mock_get_training_pipeline
+@pytest.fixture
+def mock_pipeline_service_get_with_scheduling():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "get_training_pipeline"
+ ) as mock_get_training_pipeline:
+ mock_get_training_pipeline.side_effect = [
+ make_training_pipeline_with_scheduling(
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_PENDING,
+ ),
+ make_training_pipeline_with_scheduling(
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING,
+ ),
+ make_training_pipeline_with_scheduling(
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING,
+ ),
+ make_training_pipeline_with_scheduling(
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING,
+ ),
+ make_training_pipeline_with_scheduling(
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ ),
+ make_training_pipeline_with_scheduling(
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED,
+ ),
+ ]
+
+ yield mock_get_training_pipeline
+
+
@pytest.fixture
def mock_pipeline_service_cancel():
with mock.patch.object(
@@ -650,6 +714,17 @@ def mock_pipeline_service_create_with_enable_web_access():
yield mock_create_training_pipeline
+@pytest.fixture
+def mock_pipeline_service_create_with_scheduling():
+ with mock.patch.object(
+ pipeline_service_client.PipelineServiceClient, "create_training_pipeline"
+ ) as mock_create_training_pipeline:
+ mock_create_training_pipeline.return_value = make_training_pipeline_with_scheduling(
+ state=gca_pipeline_state.PipelineState.PIPELINE_STATE_PENDING,
+ )
+ yield mock_create_training_pipeline
+
+
@pytest.fixture
def mock_pipeline_service_get_with_no_model_to_upload():
with mock.patch.object(
@@ -1397,6 +1472,54 @@ def test_run_call_pipeline_service_create_with_enable_web_access(
gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
)
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create_with_scheduling",
+ "mock_pipeline_service_get_with_scheduling",
+ "mock_python_package_to_gcs",
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_with_scheduling(self, sync, caplog):
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = training_jobs.CustomTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ script_path=_TEST_LOCAL_SCRIPT_FILE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ )
+
+ job.run(
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ timeout=_TEST_TIMEOUT,
+ restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART,
+ sync=sync,
+ )
+
+ if not sync:
+ job.wait()
+
+ assert job._gca_resource == make_training_pipeline_with_scheduling(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ assert (
+ job._gca_resource.state
+ == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+ assert job._gca_resource.training_task_inputs["timeout"] == f"{_TEST_TIMEOUT}s"
+ assert (
+ job._gca_resource.training_task_inputs["restart_job_on_worker_restart"]
+ == _TEST_RESTART_JOB_ON_WORKER_RESTART
+ )
+
@pytest.mark.usefixtures(
"mock_pipeline_service_create_with_no_model_to_upload",
"mock_pipeline_service_get_with_no_model_to_upload",
@@ -2803,6 +2926,53 @@ def test_run_call_pipeline_service_create_with_enable_web_access(
gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
)
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create_with_scheduling",
+ "mock_pipeline_service_get_with_scheduling",
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_with_scheduling(self, sync, caplog):
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = training_jobs.CustomContainerTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ command=_TEST_TRAINING_CONTAINER_CMD,
+ )
+
+ job.run(
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ timeout=_TEST_TIMEOUT,
+ restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART,
+ sync=sync,
+ )
+
+ if not sync:
+ job.wait()
+
+ assert job._gca_resource == make_training_pipeline_with_scheduling(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ assert (
+ job._gca_resource.state
+ == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+ assert job._gca_resource.training_task_inputs["timeout"] == f"{_TEST_TIMEOUT}s"
+ assert (
+ job._gca_resource.training_task_inputs["restart_job_on_worker_restart"]
+ == _TEST_RESTART_JOB_ON_WORKER_RESTART
+ )
+
@pytest.mark.parametrize("sync", [True, False])
def test_run_returns_none_if_no_model_to_upload(
self,
@@ -4481,6 +4651,54 @@ def test_run_call_pipeline_service_create_with_enable_web_access(
gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
)
+ @pytest.mark.usefixtures(
+ "mock_pipeline_service_create_with_scheduling",
+ "mock_pipeline_service_get_with_scheduling",
+ )
+ @pytest.mark.parametrize("sync", [True, False])
+ def test_run_call_pipeline_service_create_with_scheduling(self, sync, caplog):
+
+ aiplatform.init(
+ project=_TEST_PROJECT,
+ staging_bucket=_TEST_BUCKET_NAME,
+ credentials=_TEST_CREDENTIALS,
+ )
+
+ job = training_jobs.CustomPythonPackageTrainingJob(
+ display_name=_TEST_DISPLAY_NAME,
+ python_package_gcs_uri=_TEST_OUTPUT_PYTHON_PACKAGE_PATH,
+ python_module_name=_TEST_PYTHON_MODULE_NAME,
+ container_uri=_TEST_TRAINING_CONTAINER_IMAGE,
+ )
+
+ job.run(
+ base_output_dir=_TEST_BASE_OUTPUT_DIR,
+ args=_TEST_RUN_ARGS,
+ machine_type=_TEST_MACHINE_TYPE,
+ accelerator_type=_TEST_ACCELERATOR_TYPE,
+ accelerator_count=_TEST_ACCELERATOR_COUNT,
+ timeout=_TEST_TIMEOUT,
+ restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART,
+ sync=sync,
+ )
+
+ if not sync:
+ job.wait()
+
+ assert job._gca_resource == make_training_pipeline_with_scheduling(
+ gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+
+ assert (
+ job._gca_resource.state
+ == gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED
+ )
+ assert job._gca_resource.training_task_inputs["timeout"] == f"{_TEST_TIMEOUT}s"
+ assert (
+ job._gca_resource.training_task_inputs["restart_job_on_worker_restart"]
+ == _TEST_RESTART_JOB_ON_WORKER_RESTART
+ )
+
@pytest.mark.usefixtures(
"mock_pipeline_service_create_with_no_model_to_upload",
"mock_pipeline_service_get_with_no_model_to_upload",
From ef3fcc86fb3808b37706470c8c49903ec3a302fb Mon Sep 17 00:00:00 2001
From: Anthonios Partheniou
Date: Wed, 16 Feb 2022 13:23:04 -0500
Subject: [PATCH 04/15] fix: remove empty scripts kwarg in setup.py (#1014)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
The empty kwarg `scripts` was introduced in https://github.com/googleapis/python-aiplatform/pull/96.
Towards #1003 🦕
---
setup.py | 1 -
1 file changed, 1 deletion(-)
diff --git a/setup.py b/setup.py
index 2deda8b68f..01aa6686f8 100644
--- a/setup.py
+++ b/setup.py
@@ -111,7 +111,6 @@
"cloud_profiler": profiler_extra_require,
},
python_requires=">=3.6",
- scripts=[],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
From 09c2e8a368c6d265d99acfb12addd5ba6f1a50e6 Mon Sep 17 00:00:00 2001
From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com>
Date: Tue, 22 Feb 2022 15:15:19 -0800
Subject: [PATCH 05/15] feat: add TPU_V2 & TPU_V3 values to AcceleratorType in
aiplatform v1/v1beta1 accelerator_type.proto (#1010)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
* chore: use gapic-generator-python 0.63.2
docs: add generated snippets
PiperOrigin-RevId: 427792504
Source-Link: https://github.com/googleapis/googleapis/commit/55b9e1e0b3106c850d13958352bc0751147b6b15
Source-Link: https://github.com/googleapis/googleapis-gen/commit/bf4e86b753f42cb0edb1fd51fbe840d7da0a1cde
Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiYmY0ZTg2Yjc1M2Y0MmNiMGVkYjFmZDUxZmJlODQwZDdkYTBhMWNkZSJ9
* 🦉 Updates from OwlBot
See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md
* chore(python): Prepare google/cloud/certificatemanager v1 BUILD.bazel for release
PiperOrigin-RevId: 428507726
Source-Link: https://github.com/googleapis/googleapis/commit/82528cf321ed0d09b7d93b7cee9122ccea422ad2
Source-Link: https://github.com/googleapis/googleapis-gen/commit/91082242268a7eb8c8f47048e86282420d93d3c0
Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiOTEwODIyNDIyNjhhN2ViOGM4ZjQ3MDQ4ZTg2MjgyNDIwZDkzZDNjMCJ9
* 🦉 Updates from OwlBot
See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md
* docs(aiplatform): fix misformatted field description
PiperOrigin-RevId: 429098186
Source-Link: https://github.com/googleapis/googleapis/commit/e75c527befe1aa05a799c01016bb8647c69019ab
Source-Link: https://github.com/googleapis/googleapis-gen/commit/611471333818f88f66bfe24ac97d2064ce0b52ad
Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNjExNDcxMzMzODE4Zjg4ZjY2YmZlMjRhYzk3ZDIwNjRjZTBiNTJhZCJ9
* 🦉 Updates from OwlBot post-processor
See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md
* 🦉 Updates from OwlBot post-processor
See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md
* feat: add TPU_V2 & TPU_V3 values to AcceleratorType in aiplatform v1/v1beta1 accelerator_type.proto
PiperOrigin-RevId: 430259767
Source-Link: https://github.com/googleapis/googleapis/commit/f873e7f132a5062dc9a643335f35275e43cbc3c7
Source-Link: https://github.com/googleapis/googleapis-gen/commit/49f573f3b76da8880d3a82b7903036fad1a3afc5
Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNDlmNTczZjNiNzZkYTg4ODBkM2E4MmI3OTAzMDM2ZmFkMWEzYWZjNSJ9
* 🦉 Updates from OwlBot post-processor
See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md
* 🦉 Updates from OwlBot post-processor
See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md
Co-authored-by: Owl Bot
Co-authored-by: Anthonios Partheniou
---
.../services/dataset_service/async_client.py | 229 +
.../services/dataset_service/client.py | 239 +
.../services/endpoint_service/async_client.py | 165 +
.../services/endpoint_service/client.py | 172 +
.../async_client.py | 51 +
.../client.py | 53 +
.../featurestore_service/async_client.py | 479 +
.../services/featurestore_service/client.py | 499 +
.../index_endpoint_service/async_client.py | 194 +
.../services/index_endpoint_service/client.py | 202 +
.../services/index_service/async_client.py | 116 +
.../services/index_service/client.py | 121 +
.../services/job_service/async_client.py | 601 +-
.../services/job_service/client.py | 629 +-
.../services/metadata_service/async_client.py | 638 +
.../services/metadata_service/client.py | 669 +
.../migration_service/async_client.py | 51 +
.../services/migration_service/client.py | 75 +-
.../services/model_service/async_client.py | 214 +
.../services/model_service/client.py | 224 +
.../services/pipeline_service/async_client.py | 204 +
.../services/pipeline_service/client.py | 214 +
.../prediction_service/async_client.py | 67 +
.../services/prediction_service/client.py | 70 +
.../specialist_pool_service/async_client.py | 118 +
.../specialist_pool_service/client.py | 123 +
.../tensorboard_service/async_client.py | 617 +
.../services/tensorboard_service/client.py | 645 +
.../services/vizier_service/async_client.py | 308 +
.../services/vizier_service/client.py | 323 +
.../aiplatform_v1/types/accelerator_type.py | 2 +
.../cloud/aiplatform_v1/types/job_service.py | 2 +-
.../services/dataset_service/async_client.py | 229 +
.../services/dataset_service/client.py | 239 +
.../services/endpoint_service/async_client.py | 165 +
.../services/endpoint_service/client.py | 172 +
.../async_client.py | 51 +
.../client.py | 53 +
.../featurestore_service/async_client.py | 479 +
.../services/featurestore_service/client.py | 499 +
.../index_endpoint_service/async_client.py | 194 +
.../services/index_endpoint_service/client.py | 202 +
.../services/index_service/async_client.py | 116 +
.../services/index_service/client.py | 121 +
.../services/job_service/async_client.py | 601 +-
.../services/job_service/client.py | 629 +-
.../services/metadata_service/async_client.py | 638 +
.../services/metadata_service/client.py | 669 +
.../migration_service/async_client.py | 51 +
.../services/migration_service/client.py | 53 +
.../services/model_service/async_client.py | 214 +
.../services/model_service/client.py | 224 +
.../services/pipeline_service/async_client.py | 204 +
.../services/pipeline_service/client.py | 214 +
.../prediction_service/async_client.py | 67 +
.../services/prediction_service/client.py | 70 +
.../specialist_pool_service/async_client.py | 118 +
.../specialist_pool_service/client.py | 123 +
.../tensorboard_service/async_client.py | 617 +
.../services/tensorboard_service/client.py | 645 +
.../services/vizier_service/async_client.py | 308 +
.../services/vizier_service/client.py | 323 +
.../types/accelerator_type.py | 2 +
.../aiplatform_v1beta1/types/job_service.py | 2 +-
...v1_dataset_service_create_dataset_async.py | 6 +-
..._v1_dataset_service_create_dataset_sync.py | 6 +-
...v1_dataset_service_delete_dataset_async.py | 6 +-
..._v1_dataset_service_delete_dataset_sync.py | 6 +-
...rm_v1_dataset_service_export_data_async.py | 6 +-
...orm_v1_dataset_service_export_data_sync.py | 6 +-
...taset_service_get_annotation_spec_async.py | 6 +-
...ataset_service_get_annotation_spec_sync.py | 6 +-
...rm_v1_dataset_service_get_dataset_async.py | 6 +-
...orm_v1_dataset_service_get_dataset_sync.py | 6 +-
...rm_v1_dataset_service_import_data_async.py | 6 +-
...orm_v1_dataset_service_import_data_sync.py | 6 +-
..._dataset_service_list_annotations_async.py | 6 +-
...1_dataset_service_list_annotations_sync.py | 6 +-
...1_dataset_service_list_data_items_async.py | 6 +-
...v1_dataset_service_list_data_items_sync.py | 6 +-
..._v1_dataset_service_list_datasets_async.py | 6 +-
...m_v1_dataset_service_list_datasets_sync.py | 6 +-
...v1_dataset_service_update_dataset_async.py | 4 +-
..._v1_dataset_service_update_dataset_sync.py | 4 +-
..._endpoint_service_create_endpoint_async.py | 6 +-
...1_endpoint_service_create_endpoint_sync.py | 6 +-
..._endpoint_service_delete_endpoint_async.py | 6 +-
...1_endpoint_service_delete_endpoint_sync.py | 6 +-
..._v1_endpoint_service_deploy_model_async.py | 8 +-
...m_v1_endpoint_service_deploy_model_sync.py | 8 +-
..._v1_endpoint_service_get_endpoint_async.py | 6 +-
...m_v1_endpoint_service_get_endpoint_sync.py | 6 +-
...1_endpoint_service_list_endpoints_async.py | 6 +-
...v1_endpoint_service_list_endpoints_sync.py | 6 +-
...1_endpoint_service_undeploy_model_async.py | 6 +-
...v1_endpoint_service_undeploy_model_sync.py | 6 +-
..._endpoint_service_update_endpoint_async.py | 4 +-
...1_endpoint_service_update_endpoint_sync.py | 4 +-
...rving_service_read_feature_values_async.py | 6 +-
...erving_service_read_feature_values_sync.py | 6 +-
...ice_streaming_read_feature_values_async.py | 6 +-
...vice_streaming_read_feature_values_sync.py | 6 +-
...ore_service_batch_create_features_async.py | 8 +-
...tore_service_batch_create_features_sync.py | 8 +-
...service_batch_read_feature_values_async.py | 6 +-
..._service_batch_read_feature_values_sync.py | 6 +-
...estore_service_create_entity_type_async.py | 6 +-
...restore_service_create_entity_type_sync.py | 6 +-
...aturestore_service_create_feature_async.py | 6 +-
...eaturestore_service_create_feature_sync.py | 6 +-
...store_service_create_featurestore_async.py | 6 +-
...estore_service_create_featurestore_sync.py | 6 +-
...estore_service_delete_entity_type_async.py | 6 +-
...restore_service_delete_entity_type_sync.py | 6 +-
...aturestore_service_delete_feature_async.py | 6 +-
...eaturestore_service_delete_feature_sync.py | 6 +-
...store_service_delete_featurestore_async.py | 6 +-
...estore_service_delete_featurestore_sync.py | 6 +-
...ore_service_export_feature_values_async.py | 6 +-
...tore_service_export_feature_values_sync.py | 6 +-
...turestore_service_get_entity_type_async.py | 6 +-
...aturestore_service_get_entity_type_sync.py | 6 +-
..._featurestore_service_get_feature_async.py | 6 +-
...1_featurestore_service_get_feature_sync.py | 6 +-
...urestore_service_get_featurestore_async.py | 6 +-
...turestore_service_get_featurestore_sync.py | 6 +-
...ore_service_import_feature_values_async.py | 6 +-
...tore_service_import_feature_values_sync.py | 6 +-
...restore_service_list_entity_types_async.py | 6 +-
...urestore_service_list_entity_types_sync.py | 6 +-
...eaturestore_service_list_features_async.py | 6 +-
...featurestore_service_list_features_sync.py | 6 +-
...estore_service_list_featurestores_async.py | 6 +-
...restore_service_list_featurestores_sync.py | 6 +-
...turestore_service_search_features_async.py | 6 +-
...aturestore_service_search_features_sync.py | 6 +-
...estore_service_update_entity_type_async.py | 4 +-
...restore_service_update_entity_type_sync.py | 4 +-
...aturestore_service_update_feature_async.py | 4 +-
...eaturestore_service_update_feature_sync.py | 4 +-
...store_service_update_featurestore_async.py | 4 +-
...estore_service_update_featurestore_sync.py | 4 +-
...int_service_create_index_endpoint_async.py | 7 +-
...oint_service_create_index_endpoint_sync.py | 7 +-
...int_service_delete_index_endpoint_async.py | 6 +-
...oint_service_delete_index_endpoint_sync.py | 6 +-
...dex_endpoint_service_deploy_index_async.py | 8 +-
...ndex_endpoint_service_deploy_index_sync.py | 8 +-
...dpoint_service_get_index_endpoint_async.py | 6 +-
...ndpoint_service_get_index_endpoint_sync.py | 6 +-
...oint_service_list_index_endpoints_async.py | 6 +-
...point_service_list_index_endpoints_sync.py | 6 +-
...int_service_mutate_deployed_index_async.py | 54 +
...oint_service_mutate_deployed_index_sync.py | 54 +
...x_endpoint_service_undeploy_index_async.py | 6 +-
...ex_endpoint_service_undeploy_index_sync.py | 6 +-
...int_service_update_index_endpoint_async.py | 5 +-
...oint_service_update_index_endpoint_sync.py | 5 +-
...orm_v1_index_service_create_index_async.py | 6 +-
...form_v1_index_service_create_index_sync.py | 6 +-
...orm_v1_index_service_delete_index_async.py | 6 +-
...form_v1_index_service_delete_index_sync.py | 6 +-
...atform_v1_index_service_get_index_async.py | 6 +-
...latform_v1_index_service_get_index_sync.py | 6 +-
...orm_v1_index_service_list_indexes_async.py | 6 +-
...form_v1_index_service_list_indexes_sync.py | 6 +-
...orm_v1_index_service_update_index_async.py | 4 +-
...form_v1_index_service_update_index_sync.py | 4 +-
...rvice_cancel_batch_prediction_job_async.py | 6 +-
...ervice_cancel_batch_prediction_job_sync.py | 6 +-
..._v1_job_service_cancel_custom_job_async.py | 6 +-
...m_v1_job_service_cancel_custom_job_sync.py | 6 +-
..._service_cancel_data_labeling_job_async.py | 6 +-
...b_service_cancel_data_labeling_job_sync.py | 6 +-
..._cancel_hyperparameter_tuning_job_async.py | 6 +-
...e_cancel_hyperparameter_tuning_job_sync.py | 6 +-
...rvice_create_batch_prediction_job_async.py | 7 +-
...ervice_create_batch_prediction_job_sync.py | 7 +-
..._v1_job_service_create_custom_job_async.py | 6 +-
...m_v1_job_service_create_custom_job_sync.py | 6 +-
..._service_create_data_labeling_job_async.py | 8 +-
...b_service_create_data_labeling_job_sync.py | 8 +-
..._create_hyperparameter_tuning_job_async.py | 6 +-
...e_create_hyperparameter_tuning_job_sync.py | 6 +-
...e_model_deployment_monitoring_job_async.py | 8 +-
...te_model_deployment_monitoring_job_sync.py | 8 +-
...rvice_delete_batch_prediction_job_async.py | 6 +-
...ervice_delete_batch_prediction_job_sync.py | 6 +-
..._v1_job_service_delete_custom_job_async.py | 6 +-
...m_v1_job_service_delete_custom_job_sync.py | 6 +-
..._service_delete_data_labeling_job_async.py | 6 +-
...b_service_delete_data_labeling_job_sync.py | 6 +-
..._delete_hyperparameter_tuning_job_async.py | 6 +-
...e_delete_hyperparameter_tuning_job_sync.py | 6 +-
...e_model_deployment_monitoring_job_async.py | 6 +-
...te_model_deployment_monitoring_job_sync.py | 6 +-
..._service_get_batch_prediction_job_async.py | 6 +-
...b_service_get_batch_prediction_job_sync.py | 6 +-
...orm_v1_job_service_get_custom_job_async.py | 6 +-
...form_v1_job_service_get_custom_job_sync.py | 6 +-
...job_service_get_data_labeling_job_async.py | 6 +-
..._job_service_get_data_labeling_job_sync.py | 6 +-
...ice_get_hyperparameter_tuning_job_async.py | 6 +-
...vice_get_hyperparameter_tuning_job_sync.py | 6 +-
...t_model_deployment_monitoring_job_async.py | 6 +-
...et_model_deployment_monitoring_job_sync.py | 6 +-
...ervice_list_batch_prediction_jobs_async.py | 6 +-
...service_list_batch_prediction_jobs_sync.py | 6 +-
...m_v1_job_service_list_custom_jobs_async.py | 6 +-
...rm_v1_job_service_list_custom_jobs_sync.py | 6 +-
...b_service_list_data_labeling_jobs_async.py | 6 +-
...ob_service_list_data_labeling_jobs_sync.py | 6 +-
...e_list_hyperparameter_tuning_jobs_async.py | 6 +-
...ce_list_hyperparameter_tuning_jobs_sync.py | 6 +-
..._model_deployment_monitoring_jobs_async.py | 6 +-
...t_model_deployment_monitoring_jobs_sync.py | 6 +-
...e_model_deployment_monitoring_job_async.py | 6 +-
...se_model_deployment_monitoring_job_sync.py | 6 +-
...e_model_deployment_monitoring_job_async.py | 6 +-
...me_model_deployment_monitoring_job_sync.py | 6 +-
...oyment_monitoring_stats_anomalies_async.py | 6 +-
...loyment_monitoring_stats_anomalies_sync.py | 6 +-
...e_model_deployment_monitoring_job_async.py | 6 +-
...te_model_deployment_monitoring_job_sync.py | 6 +-
..._context_artifacts_and_executions_async.py | 6 +-
...d_context_artifacts_and_executions_sync.py | 6 +-
...data_service_add_context_children_async.py | 6 +-
...adata_service_add_context_children_sync.py | 6 +-
...data_service_add_execution_events_async.py | 6 +-
...adata_service_add_execution_events_sync.py | 6 +-
..._metadata_service_create_artifact_async.py | 6 +-
...1_metadata_service_create_artifact_sync.py | 6 +-
...1_metadata_service_create_context_async.py | 6 +-
...v1_metadata_service_create_context_sync.py | 6 +-
...metadata_service_create_execution_async.py | 6 +-
..._metadata_service_create_execution_sync.py | 6 +-
...ta_service_create_metadata_schema_async.py | 6 +-
...ata_service_create_metadata_schema_sync.py | 6 +-
...ata_service_create_metadata_store_async.py | 6 +-
...data_service_create_metadata_store_sync.py | 6 +-
..._metadata_service_delete_artifact_async.py | 6 +-
...1_metadata_service_delete_artifact_sync.py | 6 +-
...1_metadata_service_delete_context_async.py | 6 +-
...v1_metadata_service_delete_context_sync.py | 6 +-
...metadata_service_delete_execution_async.py | 6 +-
..._metadata_service_delete_execution_sync.py | 6 +-
...ata_service_delete_metadata_store_async.py | 6 +-
...data_service_delete_metadata_store_sync.py | 6 +-
..._v1_metadata_service_get_artifact_async.py | 6 +-
...m_v1_metadata_service_get_artifact_sync.py | 6 +-
...m_v1_metadata_service_get_context_async.py | 6 +-
...rm_v1_metadata_service_get_context_sync.py | 6 +-
...v1_metadata_service_get_execution_async.py | 6 +-
..._v1_metadata_service_get_execution_sync.py | 6 +-
...adata_service_get_metadata_schema_async.py | 6 +-
...tadata_service_get_metadata_schema_sync.py | 6 +-
...tadata_service_get_metadata_store_async.py | 6 +-
...etadata_service_get_metadata_store_sync.py | 6 +-
...1_metadata_service_list_artifacts_async.py | 6 +-
...v1_metadata_service_list_artifacts_sync.py | 6 +-
...v1_metadata_service_list_contexts_async.py | 6 +-
..._v1_metadata_service_list_contexts_sync.py | 6 +-
..._metadata_service_list_executions_async.py | 6 +-
...1_metadata_service_list_executions_sync.py | 6 +-
...ata_service_list_metadata_schemas_async.py | 6 +-
...data_service_list_metadata_schemas_sync.py | 6 +-
...data_service_list_metadata_stores_async.py | 6 +-
...adata_service_list_metadata_stores_sync.py | 6 +-
..._metadata_service_purge_artifacts_async.py | 6 +-
...1_metadata_service_purge_artifacts_sync.py | 6 +-
...1_metadata_service_purge_contexts_async.py | 6 +-
...v1_metadata_service_purge_contexts_sync.py | 6 +-
...metadata_service_purge_executions_async.py | 6 +-
..._metadata_service_purge_executions_sync.py | 6 +-
...e_query_artifact_lineage_subgraph_async.py | 6 +-
...ce_query_artifact_lineage_subgraph_sync.py | 6 +-
...ce_query_context_lineage_subgraph_async.py | 6 +-
...ice_query_context_lineage_subgraph_sync.py | 6 +-
...uery_execution_inputs_and_outputs_async.py | 6 +-
...query_execution_inputs_and_outputs_sync.py | 6 +-
..._metadata_service_update_artifact_async.py | 4 +-
...1_metadata_service_update_artifact_sync.py | 4 +-
...1_metadata_service_update_context_async.py | 4 +-
...v1_metadata_service_update_context_sync.py | 4 +-
...metadata_service_update_execution_async.py | 4 +-
..._metadata_service_update_execution_sync.py | 4 +-
...n_service_batch_migrate_resources_async.py | 8 +-
...on_service_batch_migrate_resources_sync.py | 8 +-
...rvice_search_migratable_resources_async.py | 6 +-
...ervice_search_migratable_resources_sync.py | 6 +-
...orm_v1_model_service_delete_model_async.py | 6 +-
...form_v1_model_service_delete_model_sync.py | 6 +-
...orm_v1_model_service_export_model_async.py | 6 +-
...form_v1_model_service_export_model_sync.py | 6 +-
...atform_v1_model_service_get_model_async.py | 6 +-
...odel_service_get_model_evaluation_async.py | 6 +-
...ervice_get_model_evaluation_slice_async.py | 6 +-
...service_get_model_evaluation_slice_sync.py | 6 +-
...model_service_get_model_evaluation_sync.py | 6 +-
...latform_v1_model_service_get_model_sync.py | 6 +-
...vice_list_model_evaluation_slices_async.py | 6 +-
...rvice_list_model_evaluation_slices_sync.py | 6 +-
...el_service_list_model_evaluations_async.py | 6 +-
...del_service_list_model_evaluations_sync.py | 6 +-
...form_v1_model_service_list_models_async.py | 6 +-
...tform_v1_model_service_list_models_sync.py | 6 +-
...orm_v1_model_service_update_model_async.py | 4 +-
...form_v1_model_service_update_model_sync.py | 4 +-
...orm_v1_model_service_upload_model_async.py | 6 +-
...form_v1_model_service_upload_model_sync.py | 6 +-
...eline_service_cancel_pipeline_job_async.py | 6 +-
...peline_service_cancel_pipeline_job_sync.py | 6 +-
..._service_cancel_training_pipeline_async.py | 6 +-
...e_service_cancel_training_pipeline_sync.py | 6 +-
...eline_service_create_pipeline_job_async.py | 6 +-
...peline_service_create_pipeline_job_sync.py | 6 +-
..._service_create_training_pipeline_async.py | 6 +-
...e_service_create_training_pipeline_sync.py | 6 +-
...eline_service_delete_pipeline_job_async.py | 6 +-
...peline_service_delete_pipeline_job_sync.py | 6 +-
..._service_delete_training_pipeline_async.py | 6 +-
...e_service_delete_training_pipeline_sync.py | 6 +-
...pipeline_service_get_pipeline_job_async.py | 6 +-
..._pipeline_service_get_pipeline_job_sync.py | 6 +-
...ine_service_get_training_pipeline_async.py | 6 +-
...line_service_get_training_pipeline_sync.py | 6 +-
...peline_service_list_pipeline_jobs_async.py | 6 +-
...ipeline_service_list_pipeline_jobs_sync.py | 6 +-
...e_service_list_training_pipelines_async.py | 6 +-
...ne_service_list_training_pipelines_sync.py | 6 +-
...orm_v1_prediction_service_explain_async.py | 6 +-
...form_v1_prediction_service_explain_sync.py | 6 +-
...orm_v1_prediction_service_predict_async.py | 6 +-
...form_v1_prediction_service_predict_sync.py | 6 +-
...v1_prediction_service_raw_predict_async.py | 6 +-
..._v1_prediction_service_raw_predict_sync.py | 6 +-
...ol_service_create_specialist_pool_async.py | 6 +-
...ool_service_create_specialist_pool_sync.py | 6 +-
...ol_service_delete_specialist_pool_async.py | 6 +-
...ool_service_delete_specialist_pool_sync.py | 6 +-
..._pool_service_get_specialist_pool_async.py | 6 +-
...t_pool_service_get_specialist_pool_sync.py | 6 +-
...ool_service_list_specialist_pools_async.py | 6 +-
...pool_service_list_specialist_pools_sync.py | 6 +-
...ol_service_update_specialist_pool_async.py | 4 +-
...ool_service_update_specialist_pool_sync.py | 4 +-
...ice_batch_create_tensorboard_runs_async.py | 51 +
...vice_batch_create_tensorboard_runs_sync.py | 51 +
...ch_create_tensorboard_time_series_async.py | 51 +
...tch_create_tensorboard_time_series_sync.py | 51 +
...read_tensorboard_time_series_data_async.py | 46 +
..._read_tensorboard_time_series_data_sync.py | 46 +
...rboard_service_create_tensorboard_async.py | 53 +
...ice_create_tensorboard_experiment_async.py | 46 +
...vice_create_tensorboard_experiment_sync.py | 46 +
...rd_service_create_tensorboard_run_async.py | 50 +
...ard_service_create_tensorboard_run_sync.py | 50 +
...orboard_service_create_tensorboard_sync.py | 53 +
...ce_create_tensorboard_time_series_async.py | 50 +
...ice_create_tensorboard_time_series_sync.py | 50 +
...rboard_service_delete_tensorboard_async.py | 49 +
...ice_delete_tensorboard_experiment_async.py | 49 +
...vice_delete_tensorboard_experiment_sync.py | 49 +
...rd_service_delete_tensorboard_run_async.py | 49 +
...ard_service_delete_tensorboard_run_sync.py | 49 +
...orboard_service_delete_tensorboard_sync.py | 49 +
...ce_delete_tensorboard_time_series_async.py | 49 +
...ice_delete_tensorboard_time_series_sync.py | 49 +
...port_tensorboard_time_series_data_async.py | 46 +
...xport_tensorboard_time_series_data_sync.py | 46 +
...nsorboard_service_get_tensorboard_async.py | 45 +
...ervice_get_tensorboard_experiment_async.py | 45 +
...service_get_tensorboard_experiment_sync.py | 45 +
...board_service_get_tensorboard_run_async.py | 45 +
...rboard_service_get_tensorboard_run_sync.py | 45 +
...ensorboard_service_get_tensorboard_sync.py | 45 +
...rvice_get_tensorboard_time_series_async.py | 45 +
...ervice_get_tensorboard_time_series_sync.py | 45 +
...vice_list_tensorboard_experiments_async.py | 46 +
...rvice_list_tensorboard_experiments_sync.py | 46 +
...ard_service_list_tensorboard_runs_async.py | 46 +
...oard_service_list_tensorboard_runs_sync.py | 46 +
...vice_list_tensorboard_time_series_async.py | 46 +
...rvice_list_tensorboard_time_series_sync.py | 46 +
...orboard_service_list_tensorboards_async.py | 46 +
...sorboard_service_list_tensorboards_sync.py | 46 +
...ervice_read_tensorboard_blob_data_async.py | 46 +
...service_read_tensorboard_blob_data_sync.py | 46 +
...read_tensorboard_time_series_data_async.py | 45 +
..._read_tensorboard_time_series_data_sync.py | 45 +
...rboard_service_update_tensorboard_async.py | 52 +
...ice_update_tensorboard_experiment_async.py | 44 +
...vice_update_tensorboard_experiment_sync.py | 44 +
...rd_service_update_tensorboard_run_async.py | 48 +
...ard_service_update_tensorboard_run_sync.py | 48 +
...orboard_service_update_tensorboard_sync.py | 52 +
...ce_update_tensorboard_time_series_async.py | 49 +
...ice_update_tensorboard_time_series_sync.py | 49 +
...write_tensorboard_experiment_data_async.py | 51 +
..._write_tensorboard_experiment_data_sync.py | 51 +
...ervice_write_tensorboard_run_data_async.py | 50 +
...service_write_tensorboard_run_data_sync.py | 50 +
...ier_service_add_trial_measurement_async.py | 6 +-
...zier_service_add_trial_measurement_sync.py | 6 +-
..._check_trial_early_stopping_state_async.py | 6 +-
...e_check_trial_early_stopping_state_sync.py | 6 +-
..._v1_vizier_service_complete_trial_async.py | 6 +-
...m_v1_vizier_service_complete_trial_sync.py | 6 +-
...rm_v1_vizier_service_create_study_async.py | 6 +-
...orm_v1_vizier_service_create_study_sync.py | 6 +-
...rm_v1_vizier_service_create_trial_async.py | 6 +-
...orm_v1_vizier_service_create_trial_sync.py | 6 +-
...rm_v1_vizier_service_delete_study_async.py | 6 +-
...orm_v1_vizier_service_delete_study_sync.py | 6 +-
...rm_v1_vizier_service_delete_trial_async.py | 6 +-
...orm_v1_vizier_service_delete_trial_sync.py | 6 +-
...tform_v1_vizier_service_get_study_async.py | 6 +-
...atform_v1_vizier_service_get_study_sync.py | 6 +-
...tform_v1_vizier_service_get_trial_async.py | 6 +-
...atform_v1_vizier_service_get_trial_sync.py | 6 +-
...izier_service_list_optimal_trials_async.py | 6 +-
...vizier_service_list_optimal_trials_sync.py | 6 +-
...rm_v1_vizier_service_list_studies_async.py | 6 +-
...orm_v1_vizier_service_list_studies_sync.py | 6 +-
...orm_v1_vizier_service_list_trials_async.py | 6 +-
...form_v1_vizier_service_list_trials_sync.py | 6 +-
...rm_v1_vizier_service_lookup_study_async.py | 6 +-
...orm_v1_vizier_service_lookup_study_sync.py | 6 +-
...form_v1_vizier_service_stop_trial_async.py | 6 +-
...tform_v1_vizier_service_stop_trial_sync.py | 6 +-
..._v1_vizier_service_suggest_trials_async.py | 6 +-
...m_v1_vizier_service_suggest_trials_sync.py | 6 +-
...a1_dataset_service_create_dataset_async.py | 6 +-
...ta1_dataset_service_create_dataset_sync.py | 6 +-
...a1_dataset_service_delete_dataset_async.py | 6 +-
...ta1_dataset_service_delete_dataset_sync.py | 6 +-
...beta1_dataset_service_export_data_async.py | 6 +-
...1beta1_dataset_service_export_data_sync.py | 6 +-
...taset_service_get_annotation_spec_async.py | 6 +-
...ataset_service_get_annotation_spec_sync.py | 6 +-
...beta1_dataset_service_get_dataset_async.py | 6 +-
...1beta1_dataset_service_get_dataset_sync.py | 6 +-
...beta1_dataset_service_import_data_async.py | 6 +-
...1beta1_dataset_service_import_data_sync.py | 6 +-
..._dataset_service_list_annotations_async.py | 6 +-
...1_dataset_service_list_annotations_sync.py | 6 +-
...1_dataset_service_list_data_items_async.py | 6 +-
...a1_dataset_service_list_data_items_sync.py | 6 +-
...ta1_dataset_service_list_datasets_async.py | 6 +-
...eta1_dataset_service_list_datasets_sync.py | 6 +-
...a1_dataset_service_update_dataset_async.py | 4 +-
...ta1_dataset_service_update_dataset_sync.py | 4 +-
..._endpoint_service_create_endpoint_async.py | 6 +-
...1_endpoint_service_create_endpoint_sync.py | 6 +-
..._endpoint_service_delete_endpoint_async.py | 6 +-
...1_endpoint_service_delete_endpoint_sync.py | 6 +-
...ta1_endpoint_service_deploy_model_async.py | 8 +-
...eta1_endpoint_service_deploy_model_sync.py | 8 +-
...ta1_endpoint_service_get_endpoint_async.py | 6 +-
...eta1_endpoint_service_get_endpoint_sync.py | 6 +-
...1_endpoint_service_list_endpoints_async.py | 6 +-
...a1_endpoint_service_list_endpoints_sync.py | 6 +-
...1_endpoint_service_undeploy_model_async.py | 6 +-
...a1_endpoint_service_undeploy_model_sync.py | 6 +-
..._endpoint_service_update_endpoint_async.py | 4 +-
...1_endpoint_service_update_endpoint_sync.py | 4 +-
...rving_service_read_feature_values_async.py | 6 +-
...erving_service_read_feature_values_sync.py | 6 +-
...ice_streaming_read_feature_values_async.py | 6 +-
...vice_streaming_read_feature_values_sync.py | 6 +-
...ore_service_batch_create_features_async.py | 8 +-
...tore_service_batch_create_features_sync.py | 8 +-
...service_batch_read_feature_values_async.py | 6 +-
..._service_batch_read_feature_values_sync.py | 6 +-
...estore_service_create_entity_type_async.py | 6 +-
...restore_service_create_entity_type_sync.py | 6 +-
...aturestore_service_create_feature_async.py | 6 +-
...eaturestore_service_create_feature_sync.py | 6 +-
...store_service_create_featurestore_async.py | 6 +-
...estore_service_create_featurestore_sync.py | 6 +-
...estore_service_delete_entity_type_async.py | 6 +-
...restore_service_delete_entity_type_sync.py | 6 +-
...aturestore_service_delete_feature_async.py | 6 +-
...eaturestore_service_delete_feature_sync.py | 6 +-
...store_service_delete_featurestore_async.py | 6 +-
...estore_service_delete_featurestore_sync.py | 6 +-
...ore_service_export_feature_values_async.py | 6 +-
...tore_service_export_feature_values_sync.py | 6 +-
...turestore_service_get_entity_type_async.py | 6 +-
...aturestore_service_get_entity_type_sync.py | 6 +-
..._featurestore_service_get_feature_async.py | 6 +-
...1_featurestore_service_get_feature_sync.py | 6 +-
...urestore_service_get_featurestore_async.py | 6 +-
...turestore_service_get_featurestore_sync.py | 6 +-
...ore_service_import_feature_values_async.py | 6 +-
...tore_service_import_feature_values_sync.py | 6 +-
...restore_service_list_entity_types_async.py | 6 +-
...urestore_service_list_entity_types_sync.py | 6 +-
...eaturestore_service_list_features_async.py | 6 +-
...featurestore_service_list_features_sync.py | 6 +-
...estore_service_list_featurestores_async.py | 6 +-
...restore_service_list_featurestores_sync.py | 6 +-
...turestore_service_search_features_async.py | 6 +-
...aturestore_service_search_features_sync.py | 6 +-
...estore_service_update_entity_type_async.py | 4 +-
...restore_service_update_entity_type_sync.py | 4 +-
...aturestore_service_update_feature_async.py | 4 +-
...eaturestore_service_update_feature_sync.py | 4 +-
...store_service_update_featurestore_async.py | 4 +-
...estore_service_update_featurestore_sync.py | 4 +-
...int_service_create_index_endpoint_async.py | 7 +-
...oint_service_create_index_endpoint_sync.py | 7 +-
...int_service_delete_index_endpoint_async.py | 6 +-
...oint_service_delete_index_endpoint_sync.py | 6 +-
...dex_endpoint_service_deploy_index_async.py | 8 +-
...ndex_endpoint_service_deploy_index_sync.py | 8 +-
...dpoint_service_get_index_endpoint_async.py | 6 +-
...ndpoint_service_get_index_endpoint_sync.py | 6 +-
...oint_service_list_index_endpoints_async.py | 6 +-
...point_service_list_index_endpoints_sync.py | 6 +-
...int_service_mutate_deployed_index_async.py | 54 +
...oint_service_mutate_deployed_index_sync.py | 54 +
...x_endpoint_service_undeploy_index_async.py | 6 +-
...ex_endpoint_service_undeploy_index_sync.py | 6 +-
...int_service_update_index_endpoint_async.py | 5 +-
...oint_service_update_index_endpoint_sync.py | 5 +-
...1beta1_index_service_create_index_async.py | 6 +-
...v1beta1_index_service_create_index_sync.py | 6 +-
...1beta1_index_service_delete_index_async.py | 6 +-
...v1beta1_index_service_delete_index_sync.py | 6 +-
...m_v1beta1_index_service_get_index_async.py | 6 +-
...rm_v1beta1_index_service_get_index_sync.py | 6 +-
...1beta1_index_service_list_indexes_async.py | 6 +-
...v1beta1_index_service_list_indexes_sync.py | 6 +-
...1beta1_index_service_update_index_async.py | 4 +-
...v1beta1_index_service_update_index_sync.py | 4 +-
...rvice_cancel_batch_prediction_job_async.py | 6 +-
...ervice_cancel_batch_prediction_job_sync.py | 6 +-
...ta1_job_service_cancel_custom_job_async.py | 6 +-
...eta1_job_service_cancel_custom_job_sync.py | 6 +-
..._service_cancel_data_labeling_job_async.py | 6 +-
...b_service_cancel_data_labeling_job_sync.py | 6 +-
..._cancel_hyperparameter_tuning_job_async.py | 6 +-
...e_cancel_hyperparameter_tuning_job_sync.py | 6 +-
...rvice_create_batch_prediction_job_async.py | 7 +-
...ervice_create_batch_prediction_job_sync.py | 7 +-
...ta1_job_service_create_custom_job_async.py | 6 +-
...eta1_job_service_create_custom_job_sync.py | 6 +-
..._service_create_data_labeling_job_async.py | 8 +-
...b_service_create_data_labeling_job_sync.py | 8 +-
..._create_hyperparameter_tuning_job_async.py | 6 +-
...e_create_hyperparameter_tuning_job_sync.py | 6 +-
...e_model_deployment_monitoring_job_async.py | 8 +-
...te_model_deployment_monitoring_job_sync.py | 8 +-
...rvice_delete_batch_prediction_job_async.py | 6 +-
...ervice_delete_batch_prediction_job_sync.py | 6 +-
...ta1_job_service_delete_custom_job_async.py | 6 +-
...eta1_job_service_delete_custom_job_sync.py | 6 +-
..._service_delete_data_labeling_job_async.py | 6 +-
...b_service_delete_data_labeling_job_sync.py | 6 +-
..._delete_hyperparameter_tuning_job_async.py | 6 +-
...e_delete_hyperparameter_tuning_job_sync.py | 6 +-
...e_model_deployment_monitoring_job_async.py | 6 +-
...te_model_deployment_monitoring_job_sync.py | 6 +-
..._service_get_batch_prediction_job_async.py | 6 +-
...b_service_get_batch_prediction_job_sync.py | 6 +-
...1beta1_job_service_get_custom_job_async.py | 6 +-
...v1beta1_job_service_get_custom_job_sync.py | 6 +-
...job_service_get_data_labeling_job_async.py | 6 +-
..._job_service_get_data_labeling_job_sync.py | 6 +-
...ice_get_hyperparameter_tuning_job_async.py | 6 +-
...vice_get_hyperparameter_tuning_job_sync.py | 6 +-
...t_model_deployment_monitoring_job_async.py | 6 +-
...et_model_deployment_monitoring_job_sync.py | 6 +-
...ervice_list_batch_prediction_jobs_async.py | 6 +-
...service_list_batch_prediction_jobs_sync.py | 6 +-
...eta1_job_service_list_custom_jobs_async.py | 6 +-
...beta1_job_service_list_custom_jobs_sync.py | 6 +-
...b_service_list_data_labeling_jobs_async.py | 6 +-
...ob_service_list_data_labeling_jobs_sync.py | 6 +-
...e_list_hyperparameter_tuning_jobs_async.py | 6 +-
...ce_list_hyperparameter_tuning_jobs_sync.py | 6 +-
..._model_deployment_monitoring_jobs_async.py | 6 +-
...t_model_deployment_monitoring_jobs_sync.py | 6 +-
...e_model_deployment_monitoring_job_async.py | 6 +-
...se_model_deployment_monitoring_job_sync.py | 6 +-
...e_model_deployment_monitoring_job_async.py | 6 +-
...me_model_deployment_monitoring_job_sync.py | 6 +-
...oyment_monitoring_stats_anomalies_async.py | 6 +-
...loyment_monitoring_stats_anomalies_sync.py | 6 +-
...e_model_deployment_monitoring_job_async.py | 6 +-
...te_model_deployment_monitoring_job_sync.py | 6 +-
..._context_artifacts_and_executions_async.py | 6 +-
...d_context_artifacts_and_executions_sync.py | 6 +-
...data_service_add_context_children_async.py | 6 +-
...adata_service_add_context_children_sync.py | 6 +-
...data_service_add_execution_events_async.py | 6 +-
...adata_service_add_execution_events_sync.py | 6 +-
..._metadata_service_create_artifact_async.py | 6 +-
...1_metadata_service_create_artifact_sync.py | 6 +-
...1_metadata_service_create_context_async.py | 6 +-
...a1_metadata_service_create_context_sync.py | 6 +-
...metadata_service_create_execution_async.py | 6 +-
..._metadata_service_create_execution_sync.py | 6 +-
...ta_service_create_metadata_schema_async.py | 6 +-
...ata_service_create_metadata_schema_sync.py | 6 +-
...ata_service_create_metadata_store_async.py | 6 +-
...data_service_create_metadata_store_sync.py | 6 +-
..._metadata_service_delete_artifact_async.py | 6 +-
...1_metadata_service_delete_artifact_sync.py | 6 +-
...1_metadata_service_delete_context_async.py | 6 +-
...a1_metadata_service_delete_context_sync.py | 6 +-
...metadata_service_delete_execution_async.py | 6 +-
..._metadata_service_delete_execution_sync.py | 6 +-
...ata_service_delete_metadata_store_async.py | 6 +-
...data_service_delete_metadata_store_sync.py | 6 +-
...ta1_metadata_service_get_artifact_async.py | 6 +-
...eta1_metadata_service_get_artifact_sync.py | 6 +-
...eta1_metadata_service_get_context_async.py | 6 +-
...beta1_metadata_service_get_context_sync.py | 6 +-
...a1_metadata_service_get_execution_async.py | 6 +-
...ta1_metadata_service_get_execution_sync.py | 6 +-
...adata_service_get_metadata_schema_async.py | 6 +-
...tadata_service_get_metadata_schema_sync.py | 6 +-
...tadata_service_get_metadata_store_async.py | 6 +-
...etadata_service_get_metadata_store_sync.py | 6 +-
...1_metadata_service_list_artifacts_async.py | 6 +-
...a1_metadata_service_list_artifacts_sync.py | 6 +-
...a1_metadata_service_list_contexts_async.py | 6 +-
...ta1_metadata_service_list_contexts_sync.py | 6 +-
..._metadata_service_list_executions_async.py | 6 +-
...1_metadata_service_list_executions_sync.py | 6 +-
...ata_service_list_metadata_schemas_async.py | 6 +-
...data_service_list_metadata_schemas_sync.py | 6 +-
...data_service_list_metadata_stores_async.py | 6 +-
...adata_service_list_metadata_stores_sync.py | 6 +-
..._metadata_service_purge_artifacts_async.py | 6 +-
...1_metadata_service_purge_artifacts_sync.py | 6 +-
...1_metadata_service_purge_contexts_async.py | 6 +-
...a1_metadata_service_purge_contexts_sync.py | 6 +-
...metadata_service_purge_executions_async.py | 6 +-
..._metadata_service_purge_executions_sync.py | 6 +-
...e_query_artifact_lineage_subgraph_async.py | 6 +-
...ce_query_artifact_lineage_subgraph_sync.py | 6 +-
...ce_query_context_lineage_subgraph_async.py | 6 +-
...ice_query_context_lineage_subgraph_sync.py | 6 +-
...uery_execution_inputs_and_outputs_async.py | 6 +-
...query_execution_inputs_and_outputs_sync.py | 6 +-
..._metadata_service_update_artifact_async.py | 4 +-
...1_metadata_service_update_artifact_sync.py | 4 +-
...1_metadata_service_update_context_async.py | 4 +-
...a1_metadata_service_update_context_sync.py | 4 +-
...metadata_service_update_execution_async.py | 4 +-
..._metadata_service_update_execution_sync.py | 4 +-
...n_service_batch_migrate_resources_async.py | 8 +-
...on_service_batch_migrate_resources_sync.py | 8 +-
...rvice_search_migratable_resources_async.py | 6 +-
...ervice_search_migratable_resources_sync.py | 6 +-
...1beta1_model_service_delete_model_async.py | 6 +-
...v1beta1_model_service_delete_model_sync.py | 6 +-
...1beta1_model_service_export_model_async.py | 6 +-
...v1beta1_model_service_export_model_sync.py | 6 +-
...m_v1beta1_model_service_get_model_async.py | 6 +-
...odel_service_get_model_evaluation_async.py | 6 +-
...ervice_get_model_evaluation_slice_async.py | 6 +-
...service_get_model_evaluation_slice_sync.py | 6 +-
...model_service_get_model_evaluation_sync.py | 6 +-
...rm_v1beta1_model_service_get_model_sync.py | 6 +-
...vice_list_model_evaluation_slices_async.py | 6 +-
...rvice_list_model_evaluation_slices_sync.py | 6 +-
...el_service_list_model_evaluations_async.py | 6 +-
...del_service_list_model_evaluations_sync.py | 6 +-
...v1beta1_model_service_list_models_async.py | 6 +-
..._v1beta1_model_service_list_models_sync.py | 6 +-
...1beta1_model_service_update_model_async.py | 4 +-
...v1beta1_model_service_update_model_sync.py | 4 +-
...1beta1_model_service_upload_model_async.py | 6 +-
...v1beta1_model_service_upload_model_sync.py | 6 +-
...eline_service_cancel_pipeline_job_async.py | 6 +-
...peline_service_cancel_pipeline_job_sync.py | 6 +-
..._service_cancel_training_pipeline_async.py | 6 +-
...e_service_cancel_training_pipeline_sync.py | 6 +-
...eline_service_create_pipeline_job_async.py | 6 +-
...peline_service_create_pipeline_job_sync.py | 6 +-
..._service_create_training_pipeline_async.py | 6 +-
...e_service_create_training_pipeline_sync.py | 6 +-
...eline_service_delete_pipeline_job_async.py | 6 +-
...peline_service_delete_pipeline_job_sync.py | 6 +-
..._service_delete_training_pipeline_async.py | 6 +-
...e_service_delete_training_pipeline_sync.py | 6 +-
...pipeline_service_get_pipeline_job_async.py | 6 +-
..._pipeline_service_get_pipeline_job_sync.py | 6 +-
...ine_service_get_training_pipeline_async.py | 6 +-
...line_service_get_training_pipeline_sync.py | 6 +-
...peline_service_list_pipeline_jobs_async.py | 6 +-
...ipeline_service_list_pipeline_jobs_sync.py | 6 +-
...e_service_list_training_pipelines_async.py | 6 +-
...ne_service_list_training_pipelines_sync.py | 6 +-
...1beta1_prediction_service_explain_async.py | 6 +-
...v1beta1_prediction_service_explain_sync.py | 6 +-
...1beta1_prediction_service_predict_async.py | 6 +-
...v1beta1_prediction_service_predict_sync.py | 6 +-
...a1_prediction_service_raw_predict_async.py | 6 +-
...ta1_prediction_service_raw_predict_sync.py | 6 +-
...ol_service_create_specialist_pool_async.py | 6 +-
...ool_service_create_specialist_pool_sync.py | 6 +-
...ol_service_delete_specialist_pool_async.py | 6 +-
...ool_service_delete_specialist_pool_sync.py | 6 +-
..._pool_service_get_specialist_pool_async.py | 6 +-
...t_pool_service_get_specialist_pool_sync.py | 6 +-
...ool_service_list_specialist_pools_async.py | 6 +-
...pool_service_list_specialist_pools_sync.py | 6 +-
...ol_service_update_specialist_pool_async.py | 4 +-
...ool_service_update_specialist_pool_sync.py | 4 +-
...ice_batch_create_tensorboard_runs_async.py | 8 +-
...vice_batch_create_tensorboard_runs_sync.py | 8 +-
...ch_create_tensorboard_time_series_async.py | 8 +-
...tch_create_tensorboard_time_series_sync.py | 8 +-
...read_tensorboard_time_series_data_async.py | 8 +-
..._read_tensorboard_time_series_data_sync.py | 8 +-
...rboard_service_create_tensorboard_async.py | 6 +-
...ice_create_tensorboard_experiment_async.py | 6 +-
...vice_create_tensorboard_experiment_sync.py | 6 +-
...rd_service_create_tensorboard_run_async.py | 6 +-
...ard_service_create_tensorboard_run_sync.py | 6 +-
...orboard_service_create_tensorboard_sync.py | 6 +-
...ce_create_tensorboard_time_series_async.py | 6 +-
...ice_create_tensorboard_time_series_sync.py | 6 +-
...rboard_service_delete_tensorboard_async.py | 6 +-
...ice_delete_tensorboard_experiment_async.py | 6 +-
...vice_delete_tensorboard_experiment_sync.py | 6 +-
...rd_service_delete_tensorboard_run_async.py | 6 +-
...ard_service_delete_tensorboard_run_sync.py | 6 +-
...orboard_service_delete_tensorboard_sync.py | 6 +-
...ce_delete_tensorboard_time_series_async.py | 6 +-
...ice_delete_tensorboard_time_series_sync.py | 6 +-
...port_tensorboard_time_series_data_async.py | 6 +-
...xport_tensorboard_time_series_data_sync.py | 6 +-
...nsorboard_service_get_tensorboard_async.py | 6 +-
...ervice_get_tensorboard_experiment_async.py | 6 +-
...service_get_tensorboard_experiment_sync.py | 6 +-
...board_service_get_tensorboard_run_async.py | 6 +-
...rboard_service_get_tensorboard_run_sync.py | 6 +-
...ensorboard_service_get_tensorboard_sync.py | 6 +-
...rvice_get_tensorboard_time_series_async.py | 6 +-
...ervice_get_tensorboard_time_series_sync.py | 6 +-
...vice_list_tensorboard_experiments_async.py | 6 +-
...rvice_list_tensorboard_experiments_sync.py | 6 +-
...ard_service_list_tensorboard_runs_async.py | 6 +-
...oard_service_list_tensorboard_runs_sync.py | 6 +-
...vice_list_tensorboard_time_series_async.py | 6 +-
...rvice_list_tensorboard_time_series_sync.py | 6 +-
...orboard_service_list_tensorboards_async.py | 6 +-
...sorboard_service_list_tensorboards_sync.py | 6 +-
...ervice_read_tensorboard_blob_data_async.py | 6 +-
...service_read_tensorboard_blob_data_sync.py | 6 +-
...read_tensorboard_time_series_data_async.py | 6 +-
..._read_tensorboard_time_series_data_sync.py | 6 +-
...rboard_service_update_tensorboard_async.py | 4 +-
...ice_update_tensorboard_experiment_async.py | 4 +-
...vice_update_tensorboard_experiment_sync.py | 4 +-
...rd_service_update_tensorboard_run_async.py | 4 +-
...ard_service_update_tensorboard_run_sync.py | 4 +-
...orboard_service_update_tensorboard_sync.py | 4 +-
...ce_update_tensorboard_time_series_async.py | 4 +-
...ice_update_tensorboard_time_series_sync.py | 4 +-
...write_tensorboard_experiment_data_async.py | 8 +-
..._write_tensorboard_experiment_data_sync.py | 8 +-
...ervice_write_tensorboard_run_data_async.py | 6 +-
...service_write_tensorboard_run_data_sync.py | 6 +-
...ier_service_add_trial_measurement_async.py | 6 +-
...zier_service_add_trial_measurement_sync.py | 6 +-
..._check_trial_early_stopping_state_async.py | 6 +-
...e_check_trial_early_stopping_state_sync.py | 6 +-
...ta1_vizier_service_complete_trial_async.py | 6 +-
...eta1_vizier_service_complete_trial_sync.py | 6 +-
...beta1_vizier_service_create_study_async.py | 6 +-
...1beta1_vizier_service_create_study_sync.py | 6 +-
...beta1_vizier_service_create_trial_async.py | 6 +-
...1beta1_vizier_service_create_trial_sync.py | 6 +-
...beta1_vizier_service_delete_study_async.py | 6 +-
...1beta1_vizier_service_delete_study_sync.py | 6 +-
...beta1_vizier_service_delete_trial_async.py | 6 +-
...1beta1_vizier_service_delete_trial_sync.py | 6 +-
..._v1beta1_vizier_service_get_study_async.py | 6 +-
...m_v1beta1_vizier_service_get_study_sync.py | 6 +-
..._v1beta1_vizier_service_get_trial_async.py | 6 +-
...m_v1beta1_vizier_service_get_trial_sync.py | 6 +-
...izier_service_list_optimal_trials_async.py | 6 +-
...vizier_service_list_optimal_trials_sync.py | 6 +-
...beta1_vizier_service_list_studies_async.py | 6 +-
...1beta1_vizier_service_list_studies_sync.py | 6 +-
...1beta1_vizier_service_list_trials_async.py | 6 +-
...v1beta1_vizier_service_list_trials_sync.py | 6 +-
...beta1_vizier_service_lookup_study_async.py | 6 +-
...1beta1_vizier_service_lookup_study_sync.py | 6 +-
...v1beta1_vizier_service_stop_trial_async.py | 6 +-
..._v1beta1_vizier_service_stop_trial_sync.py | 6 +-
...ta1_vizier_service_suggest_trials_async.py | 6 +-
...eta1_vizier_service_suggest_trials_sync.py | 6 +-
.../snippet_metadata_aiplatform_v1.json | 16340 ++++++++++++++++
.../snippet_metadata_aiplatform_v1beta1.json | 16340 ++++++++++++++++
.../aiplatform_v1/test_dataset_service.py | 95 +-
.../aiplatform_v1/test_endpoint_service.py | 95 +-
...est_featurestore_online_serving_service.py | 95 +-
.../test_featurestore_service.py | 95 +-
.../test_index_endpoint_service.py | 95 +-
.../gapic/aiplatform_v1/test_index_service.py | 95 +-
.../gapic/aiplatform_v1/test_job_service.py | 90 +-
.../aiplatform_v1/test_metadata_service.py | 95 +-
.../aiplatform_v1/test_migration_service.py | 123 +-
.../gapic/aiplatform_v1/test_model_service.py | 95 +-
.../aiplatform_v1/test_pipeline_service.py | 95 +-
.../aiplatform_v1/test_prediction_service.py | 95 +-
.../test_specialist_pool_service.py | 95 +-
.../aiplatform_v1/test_tensorboard_service.py | 101 +-
.../aiplatform_v1/test_vizier_service.py | 95 +-
.../test_dataset_service.py | 95 +-
.../test_endpoint_service.py | 95 +-
...est_featurestore_online_serving_service.py | 95 +-
.../test_featurestore_service.py | 95 +-
.../test_index_endpoint_service.py | 95 +-
.../aiplatform_v1beta1/test_index_service.py | 95 +-
.../aiplatform_v1beta1/test_job_service.py | 90 +-
.../test_metadata_service.py | 95 +-
.../test_migration_service.py | 95 +-
.../aiplatform_v1beta1/test_model_service.py | 95 +-
.../test_pipeline_service.py | 95 +-
.../test_prediction_service.py | 95 +-
.../test_specialist_pool_service.py | 95 +-
.../test_tensorboard_service.py | 95 +-
.../aiplatform_v1beta1/test_vizier_service.py | 95 +-
832 files changed, 55828 insertions(+), 3244 deletions(-)
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_mutate_deployed_index_async.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_mutate_deployed_index_sync.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_batch_create_tensorboard_runs_async.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_batch_create_tensorboard_runs_sync.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_batch_create_tensorboard_time_series_async.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_batch_create_tensorboard_time_series_sync.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_batch_read_tensorboard_time_series_data_async.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_batch_read_tensorboard_time_series_data_sync.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_create_tensorboard_async.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_create_tensorboard_experiment_async.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_create_tensorboard_experiment_sync.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_create_tensorboard_run_async.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_create_tensorboard_run_sync.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_create_tensorboard_sync.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_create_tensorboard_time_series_async.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_create_tensorboard_time_series_sync.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_delete_tensorboard_async.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_delete_tensorboard_experiment_async.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_delete_tensorboard_experiment_sync.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_delete_tensorboard_run_async.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_delete_tensorboard_run_sync.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_delete_tensorboard_sync.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_delete_tensorboard_time_series_async.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_delete_tensorboard_time_series_sync.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_export_tensorboard_time_series_data_async.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_export_tensorboard_time_series_data_sync.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_get_tensorboard_async.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_get_tensorboard_experiment_async.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_get_tensorboard_experiment_sync.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_get_tensorboard_run_async.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_get_tensorboard_run_sync.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_get_tensorboard_sync.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_get_tensorboard_time_series_async.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_get_tensorboard_time_series_sync.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_list_tensorboard_experiments_async.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_list_tensorboard_experiments_sync.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_list_tensorboard_runs_async.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_list_tensorboard_runs_sync.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_list_tensorboard_time_series_async.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_list_tensorboard_time_series_sync.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_list_tensorboards_async.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_list_tensorboards_sync.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_read_tensorboard_blob_data_async.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_read_tensorboard_blob_data_sync.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_read_tensorboard_time_series_data_async.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_read_tensorboard_time_series_data_sync.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_async.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_experiment_async.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_experiment_sync.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_run_async.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_run_sync.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_sync.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_time_series_async.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_time_series_sync.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_write_tensorboard_experiment_data_async.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_write_tensorboard_experiment_data_sync.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_write_tensorboard_run_data_async.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_write_tensorboard_run_data_sync.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_mutate_deployed_index_async.py
create mode 100644 samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_mutate_deployed_index_sync.py
create mode 100644 samples/generated_samples/snippet_metadata_aiplatform_v1.json
create mode 100644 samples/generated_samples/snippet_metadata_aiplatform_v1beta1.json
diff --git a/google/cloud/aiplatform_v1/services/dataset_service/async_client.py b/google/cloud/aiplatform_v1/services/dataset_service/async_client.py
index 163d755b46..356042e538 100644
--- a/google/cloud/aiplatform_v1/services/dataset_service/async_client.py
+++ b/google/cloud/aiplatform_v1/services/dataset_service/async_client.py
@@ -237,6 +237,35 @@ async def create_dataset(
) -> operation_async.AsyncOperation:
r"""Creates a Dataset.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_dataset():
+ # Create a client
+ client = aiplatform_v1.DatasetServiceClient()
+
+ # Initialize request argument(s)
+ dataset = aiplatform_v1.Dataset()
+ dataset.display_name = "display_name_value"
+ dataset.metadata_schema_uri = "metadata_schema_uri_value"
+ dataset.metadata.null_value = "NULL_VALUE"
+
+ request = aiplatform_v1.CreateDatasetRequest(
+ parent="parent_value",
+ dataset=dataset,
+ )
+
+ # Make the request
+ operation = client.create_dataset(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateDatasetRequest, dict]):
The request object. Request message for
@@ -327,6 +356,25 @@ async def get_dataset(
) -> dataset.Dataset:
r"""Gets a Dataset.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_dataset():
+ # Create a client
+ client = aiplatform_v1.DatasetServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetDatasetRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_dataset(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetDatasetRequest, dict]):
The request object. Request message for
@@ -399,6 +447,30 @@ async def update_dataset(
) -> gca_dataset.Dataset:
r"""Updates a Dataset.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_update_dataset():
+ # Create a client
+ client = aiplatform_v1.DatasetServiceClient()
+
+ # Initialize request argument(s)
+ dataset = aiplatform_v1.Dataset()
+ dataset.display_name = "display_name_value"
+ dataset.metadata_schema_uri = "metadata_schema_uri_value"
+ dataset.metadata.null_value = "NULL_VALUE"
+
+ request = aiplatform_v1.UpdateDatasetRequest(
+ dataset=dataset,
+ )
+
+ # Make the request
+ response = client.update_dataset(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateDatasetRequest, dict]):
The request object. Request message for
@@ -487,6 +559,26 @@ async def list_datasets(
) -> pagers.ListDatasetsAsyncPager:
r"""Lists Datasets in a Location.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_datasets():
+ # Create a client
+ client = aiplatform_v1.DatasetServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListDatasetsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_datasets(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListDatasetsRequest, dict]):
The request object. Request message for
@@ -567,6 +659,29 @@ async def delete_dataset(
) -> operation_async.AsyncOperation:
r"""Deletes a Dataset.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_dataset():
+ # Create a client
+ client = aiplatform_v1.DatasetServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteDatasetRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_dataset(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteDatasetRequest, dict]):
The request object. Request message for
@@ -661,6 +776,34 @@ async def import_data(
) -> operation_async.AsyncOperation:
r"""Imports data into a Dataset.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_import_data():
+ # Create a client
+ client = aiplatform_v1.DatasetServiceClient()
+
+ # Initialize request argument(s)
+ import_configs = aiplatform_v1.ImportDataConfig()
+ import_configs.gcs_source.uris = ['uris_value_1', 'uris_value_2']
+ import_configs.import_schema_uri = "import_schema_uri_value"
+
+ request = aiplatform_v1.ImportDataRequest(
+ name="name_value",
+ import_configs=import_configs,
+ )
+
+ # Make the request
+ operation = client.import_data(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ImportDataRequest, dict]):
The request object. Request message for
@@ -755,6 +898,33 @@ async def export_data(
) -> operation_async.AsyncOperation:
r"""Exports data from a Dataset.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_export_data():
+ # Create a client
+ client = aiplatform_v1.DatasetServiceClient()
+
+ # Initialize request argument(s)
+ export_config = aiplatform_v1.ExportDataConfig()
+ export_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value"
+
+ request = aiplatform_v1.ExportDataRequest(
+ name="name_value",
+ export_config=export_config,
+ )
+
+ # Make the request
+ operation = client.export_data(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ExportDataRequest, dict]):
The request object. Request message for
@@ -847,6 +1017,26 @@ async def list_data_items(
) -> pagers.ListDataItemsAsyncPager:
r"""Lists DataItems in a Dataset.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_data_items():
+ # Create a client
+ client = aiplatform_v1.DatasetServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListDataItemsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_data_items(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListDataItemsRequest, dict]):
The request object. Request message for
@@ -928,6 +1118,25 @@ async def get_annotation_spec(
) -> annotation_spec.AnnotationSpec:
r"""Gets an AnnotationSpec.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_annotation_spec():
+ # Create a client
+ client = aiplatform_v1.DatasetServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetAnnotationSpecRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_annotation_spec(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetAnnotationSpecRequest, dict]):
The request object. Request message for
@@ -1000,6 +1209,26 @@ async def list_annotations(
) -> pagers.ListAnnotationsAsyncPager:
r"""Lists Annotations belongs to a dataitem
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_annotations():
+ # Create a client
+ client = aiplatform_v1.DatasetServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListAnnotationsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_annotations(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListAnnotationsRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1/services/dataset_service/client.py b/google/cloud/aiplatform_v1/services/dataset_service/client.py
index a9c2918032..8446d82014 100644
--- a/google/cloud/aiplatform_v1/services/dataset_service/client.py
+++ b/google/cloud/aiplatform_v1/services/dataset_service/client.py
@@ -487,6 +487,36 @@ def create_dataset(
) -> gac_operation.Operation:
r"""Creates a Dataset.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_dataset():
+ # Create a client
+ client = aiplatform_v1.DatasetServiceClient()
+
+ # Initialize request argument(s)
+ dataset = aiplatform_v1.Dataset()
+ dataset.display_name = "display_name_value"
+ dataset.metadata_schema_uri = "metadata_schema_uri_value"
+ dataset.metadata.null_value = "NULL_VALUE"
+
+ request = aiplatform_v1.CreateDatasetRequest(
+ parent="parent_value",
+ dataset=dataset,
+ )
+
+ # Make the request
+ operation = client.create_dataset(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateDatasetRequest, dict]):
The request object. Request message for
@@ -577,6 +607,26 @@ def get_dataset(
) -> dataset.Dataset:
r"""Gets a Dataset.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_dataset():
+ # Create a client
+ client = aiplatform_v1.DatasetServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetDatasetRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_dataset(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetDatasetRequest, dict]):
The request object. Request message for
@@ -649,6 +699,31 @@ def update_dataset(
) -> gca_dataset.Dataset:
r"""Updates a Dataset.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_update_dataset():
+ # Create a client
+ client = aiplatform_v1.DatasetServiceClient()
+
+ # Initialize request argument(s)
+ dataset = aiplatform_v1.Dataset()
+ dataset.display_name = "display_name_value"
+ dataset.metadata_schema_uri = "metadata_schema_uri_value"
+ dataset.metadata.null_value = "NULL_VALUE"
+
+ request = aiplatform_v1.UpdateDatasetRequest(
+ dataset=dataset,
+ )
+
+ # Make the request
+ response = client.update_dataset(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateDatasetRequest, dict]):
The request object. Request message for
@@ -737,6 +812,27 @@ def list_datasets(
) -> pagers.ListDatasetsPager:
r"""Lists Datasets in a Location.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_datasets():
+ # Create a client
+ client = aiplatform_v1.DatasetServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListDatasetsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_datasets(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListDatasetsRequest, dict]):
The request object. Request message for
@@ -817,6 +913,30 @@ def delete_dataset(
) -> gac_operation.Operation:
r"""Deletes a Dataset.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_dataset():
+ # Create a client
+ client = aiplatform_v1.DatasetServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteDatasetRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_dataset(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteDatasetRequest, dict]):
The request object. Request message for
@@ -911,6 +1031,35 @@ def import_data(
) -> gac_operation.Operation:
r"""Imports data into a Dataset.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_import_data():
+ # Create a client
+ client = aiplatform_v1.DatasetServiceClient()
+
+ # Initialize request argument(s)
+ import_configs = aiplatform_v1.ImportDataConfig()
+ import_configs.gcs_source.uris = ['uris_value_1', 'uris_value_2']
+ import_configs.import_schema_uri = "import_schema_uri_value"
+
+ request = aiplatform_v1.ImportDataRequest(
+ name="name_value",
+ import_configs=import_configs,
+ )
+
+ # Make the request
+ operation = client.import_data(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ImportDataRequest, dict]):
The request object. Request message for
@@ -1005,6 +1154,34 @@ def export_data(
) -> gac_operation.Operation:
r"""Exports data from a Dataset.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_export_data():
+ # Create a client
+ client = aiplatform_v1.DatasetServiceClient()
+
+ # Initialize request argument(s)
+ export_config = aiplatform_v1.ExportDataConfig()
+ export_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value"
+
+ request = aiplatform_v1.ExportDataRequest(
+ name="name_value",
+ export_config=export_config,
+ )
+
+ # Make the request
+ operation = client.export_data(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ExportDataRequest, dict]):
The request object. Request message for
@@ -1097,6 +1274,27 @@ def list_data_items(
) -> pagers.ListDataItemsPager:
r"""Lists DataItems in a Dataset.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_data_items():
+ # Create a client
+ client = aiplatform_v1.DatasetServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListDataItemsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_data_items(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListDataItemsRequest, dict]):
The request object. Request message for
@@ -1178,6 +1376,26 @@ def get_annotation_spec(
) -> annotation_spec.AnnotationSpec:
r"""Gets an AnnotationSpec.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_annotation_spec():
+ # Create a client
+ client = aiplatform_v1.DatasetServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetAnnotationSpecRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_annotation_spec(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetAnnotationSpecRequest, dict]):
The request object. Request message for
@@ -1250,6 +1468,27 @@ def list_annotations(
) -> pagers.ListAnnotationsPager:
r"""Lists Annotations belongs to a dataitem
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_annotations():
+ # Create a client
+ client = aiplatform_v1.DatasetServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListAnnotationsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_annotations(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListAnnotationsRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py b/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py
index 245b5047a9..e80de16bdf 100644
--- a/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py
+++ b/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py
@@ -234,6 +234,33 @@ async def create_endpoint(
) -> operation_async.AsyncOperation:
r"""Creates an Endpoint.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_endpoint():
+ # Create a client
+ client = aiplatform_v1.EndpointServiceClient()
+
+ # Initialize request argument(s)
+ endpoint = aiplatform_v1.Endpoint()
+ endpoint.display_name = "display_name_value"
+
+ request = aiplatform_v1.CreateEndpointRequest(
+ parent="parent_value",
+ endpoint=endpoint,
+ )
+
+ # Make the request
+ operation = client.create_endpoint(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateEndpointRequest, dict]):
The request object. Request message for
@@ -340,6 +367,25 @@ async def get_endpoint(
) -> endpoint.Endpoint:
r"""Gets an Endpoint.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_endpoint():
+ # Create a client
+ client = aiplatform_v1.EndpointServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetEndpointRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_endpoint(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetEndpointRequest, dict]):
The request object. Request message for
@@ -412,6 +458,26 @@ async def list_endpoints(
) -> pagers.ListEndpointsAsyncPager:
r"""Lists Endpoints in a Location.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_endpoints():
+ # Create a client
+ client = aiplatform_v1.EndpointServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListEndpointsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_endpoints(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListEndpointsRequest, dict]):
The request object. Request message for
@@ -494,6 +560,28 @@ async def update_endpoint(
) -> gca_endpoint.Endpoint:
r"""Updates an Endpoint.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_update_endpoint():
+ # Create a client
+ client = aiplatform_v1.EndpointServiceClient()
+
+ # Initialize request argument(s)
+ endpoint = aiplatform_v1.Endpoint()
+ endpoint.display_name = "display_name_value"
+
+ request = aiplatform_v1.UpdateEndpointRequest(
+ endpoint=endpoint,
+ )
+
+ # Make the request
+ response = client.update_endpoint(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateEndpointRequest, dict]):
The request object. Request message for
@@ -577,6 +665,29 @@ async def delete_endpoint(
) -> operation_async.AsyncOperation:
r"""Deletes an Endpoint.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_endpoint():
+ # Create a client
+ client = aiplatform_v1.EndpointServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteEndpointRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_endpoint(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteEndpointRequest, dict]):
The request object. Request message for
@@ -675,6 +786,35 @@ async def deploy_model(
r"""Deploys a Model into this Endpoint, creating a
DeployedModel within it.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_deploy_model():
+ # Create a client
+ client = aiplatform_v1.EndpointServiceClient()
+
+ # Initialize request argument(s)
+ deployed_model = aiplatform_v1.DeployedModel()
+ deployed_model.dedicated_resources.min_replica_count = 1803
+ deployed_model.model = "model_value"
+
+ request = aiplatform_v1.DeployModelRequest(
+ endpoint="endpoint_value",
+ deployed_model=deployed_model,
+ )
+
+ # Make the request
+ operation = client.deploy_model(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeployModelRequest, dict]):
The request object. Request message for
@@ -801,6 +941,31 @@ async def undeploy_model(
DeployedModel from it, and freeing all resources it's
using.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_undeploy_model():
+ # Create a client
+ client = aiplatform_v1.EndpointServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.UndeployModelRequest(
+ endpoint="endpoint_value",
+ deployed_model_id="deployed_model_id_value",
+ )
+
+ # Make the request
+ operation = client.undeploy_model(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.UndeployModelRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/client.py b/google/cloud/aiplatform_v1/services/endpoint_service/client.py
index 37d9db6527..ddec31d771 100644
--- a/google/cloud/aiplatform_v1/services/endpoint_service/client.py
+++ b/google/cloud/aiplatform_v1/services/endpoint_service/client.py
@@ -472,6 +472,34 @@ def create_endpoint(
) -> gac_operation.Operation:
r"""Creates an Endpoint.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_endpoint():
+ # Create a client
+ client = aiplatform_v1.EndpointServiceClient()
+
+ # Initialize request argument(s)
+ endpoint = aiplatform_v1.Endpoint()
+ endpoint.display_name = "display_name_value"
+
+ request = aiplatform_v1.CreateEndpointRequest(
+ parent="parent_value",
+ endpoint=endpoint,
+ )
+
+ # Make the request
+ operation = client.create_endpoint(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateEndpointRequest, dict]):
The request object. Request message for
@@ -578,6 +606,26 @@ def get_endpoint(
) -> endpoint.Endpoint:
r"""Gets an Endpoint.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_endpoint():
+ # Create a client
+ client = aiplatform_v1.EndpointServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetEndpointRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_endpoint(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetEndpointRequest, dict]):
The request object. Request message for
@@ -650,6 +698,27 @@ def list_endpoints(
) -> pagers.ListEndpointsPager:
r"""Lists Endpoints in a Location.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_endpoints():
+ # Create a client
+ client = aiplatform_v1.EndpointServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListEndpointsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_endpoints(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListEndpointsRequest, dict]):
The request object. Request message for
@@ -732,6 +801,29 @@ def update_endpoint(
) -> gca_endpoint.Endpoint:
r"""Updates an Endpoint.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_update_endpoint():
+ # Create a client
+ client = aiplatform_v1.EndpointServiceClient()
+
+ # Initialize request argument(s)
+ endpoint = aiplatform_v1.Endpoint()
+ endpoint.display_name = "display_name_value"
+
+ request = aiplatform_v1.UpdateEndpointRequest(
+ endpoint=endpoint,
+ )
+
+ # Make the request
+ response = client.update_endpoint(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateEndpointRequest, dict]):
The request object. Request message for
@@ -815,6 +907,30 @@ def delete_endpoint(
) -> gac_operation.Operation:
r"""Deletes an Endpoint.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_endpoint():
+ # Create a client
+ client = aiplatform_v1.EndpointServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteEndpointRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_endpoint(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteEndpointRequest, dict]):
The request object. Request message for
@@ -913,6 +1029,36 @@ def deploy_model(
r"""Deploys a Model into this Endpoint, creating a
DeployedModel within it.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_deploy_model():
+ # Create a client
+ client = aiplatform_v1.EndpointServiceClient()
+
+ # Initialize request argument(s)
+ deployed_model = aiplatform_v1.DeployedModel()
+ deployed_model.dedicated_resources.min_replica_count = 1803
+ deployed_model.model = "model_value"
+
+ request = aiplatform_v1.DeployModelRequest(
+ endpoint="endpoint_value",
+ deployed_model=deployed_model,
+ )
+
+ # Make the request
+ operation = client.deploy_model(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeployModelRequest, dict]):
The request object. Request message for
@@ -1038,6 +1184,32 @@ def undeploy_model(
DeployedModel from it, and freeing all resources it's
using.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_undeploy_model():
+ # Create a client
+ client = aiplatform_v1.EndpointServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.UndeployModelRequest(
+ endpoint="endpoint_value",
+ deployed_model_id="deployed_model_id_value",
+ )
+
+ # Make the request
+ operation = client.undeploy_model(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.UndeployModelRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/async_client.py b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/async_client.py
index 639d8122a4..cd92bce02a 100644
--- a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/async_client.py
+++ b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/async_client.py
@@ -244,6 +244,31 @@ async def read_feature_values(
entities of an EntityType, please use
StreamingReadFeatureValues.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_read_feature_values():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreOnlineServingServiceClient()
+
+ # Initialize request argument(s)
+ feature_selector = aiplatform_v1.FeatureSelector()
+ feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2']
+
+ request = aiplatform_v1.ReadFeatureValuesRequest(
+ entity_type="entity_type_value",
+ entity_id="entity_id_value",
+ feature_selector=feature_selector,
+ )
+
+ # Make the request
+ response = client.read_feature_values(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ReadFeatureValuesRequest, dict]):
The request object. Request message for
@@ -327,6 +352,32 @@ def streaming_read_feature_values(
on their size, data for different entities may be broken
up across multiple responses.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_streaming_read_feature_values():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreOnlineServingServiceClient()
+
+ # Initialize request argument(s)
+ feature_selector = aiplatform_v1.FeatureSelector()
+ feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2']
+
+ request = aiplatform_v1.StreamingReadFeatureValuesRequest(
+ entity_type="entity_type_value",
+ entity_ids=['entity_ids_value_1', 'entity_ids_value_2'],
+ feature_selector=feature_selector,
+ )
+
+ # Make the request
+ stream = client.streaming_read_feature_values(request=request)
+
+ # Handle the response
+ for response in stream:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.StreamingReadFeatureValuesRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/client.py b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/client.py
index cd70627ef6..5403d43e8d 100644
--- a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/client.py
+++ b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/client.py
@@ -430,6 +430,32 @@ def read_feature_values(
entities of an EntityType, please use
StreamingReadFeatureValues.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_read_feature_values():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreOnlineServingServiceClient()
+
+ # Initialize request argument(s)
+ feature_selector = aiplatform_v1.FeatureSelector()
+ feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2']
+
+ request = aiplatform_v1.ReadFeatureValuesRequest(
+ entity_type="entity_type_value",
+ entity_id="entity_id_value",
+ feature_selector=feature_selector,
+ )
+
+ # Make the request
+ response = client.read_feature_values(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ReadFeatureValuesRequest, dict]):
The request object. Request message for
@@ -513,6 +539,33 @@ def streaming_read_feature_values(
on their size, data for different entities may be broken
up across multiple responses.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_streaming_read_feature_values():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreOnlineServingServiceClient()
+
+ # Initialize request argument(s)
+ feature_selector = aiplatform_v1.FeatureSelector()
+ feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2']
+
+ request = aiplatform_v1.StreamingReadFeatureValuesRequest(
+ entity_type="entity_type_value",
+ entity_ids=['entity_ids_value_1', 'entity_ids_value_2'],
+ feature_selector=feature_selector,
+ )
+
+ # Make the request
+ stream = client.streaming_read_feature_values(request=request)
+
+ # Handle the response
+ for response in stream:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.StreamingReadFeatureValuesRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/async_client.py b/google/cloud/aiplatform_v1/services/featurestore_service/async_client.py
index 89bcf46a87..aa9bab4a09 100644
--- a/google/cloud/aiplatform_v1/services/featurestore_service/async_client.py
+++ b/google/cloud/aiplatform_v1/services/featurestore_service/async_client.py
@@ -240,6 +240,31 @@ async def create_featurestore(
r"""Creates a new Featurestore in a given project and
location.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_featurestore():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.CreateFeaturestoreRequest(
+ parent="parent_value",
+ featurestore_id="featurestore_id_value",
+ )
+
+ # Make the request
+ operation = client.create_featurestore(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateFeaturestoreRequest, dict]):
The request object. Request message for
@@ -348,6 +373,25 @@ async def get_featurestore(
) -> featurestore.Featurestore:
r"""Gets details of a single Featurestore.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_featurestore():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetFeaturestoreRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_featurestore(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetFeaturestoreRequest, dict]):
The request object. Request message for
@@ -422,6 +466,26 @@ async def list_featurestores(
) -> pagers.ListFeaturestoresAsyncPager:
r"""Lists Featurestores in a given project and location.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_featurestores():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListFeaturestoresRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_featurestores(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListFeaturestoresRequest, dict]):
The request object. Request message for
@@ -504,6 +568,28 @@ async def update_featurestore(
) -> operation_async.AsyncOperation:
r"""Updates the parameters of a single Featurestore.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_update_featurestore():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.UpdateFeaturestoreRequest(
+ )
+
+ # Make the request
+ operation = client.update_featurestore(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateFeaturestoreRequest, dict]):
The request object. Request message for
@@ -613,6 +699,30 @@ async def delete_featurestore(
any EntityTypes or ``force`` must be set to true for the request
to succeed.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_featurestore():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteFeaturestoreRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_featurestore(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteFeaturestoreRequest, dict]):
The request object. Request message for
@@ -720,6 +830,30 @@ async def create_entity_type(
) -> operation_async.AsyncOperation:
r"""Creates a new EntityType in a given Featurestore.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_entity_type():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.CreateEntityTypeRequest(
+ parent="parent_value",
+ entity_type_id="entity_type_id_value",
+ )
+
+ # Make the request
+ operation = client.create_entity_type(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateEntityTypeRequest, dict]):
The request object. Request message for
@@ -827,6 +961,25 @@ async def get_entity_type(
) -> entity_type.EntityType:
r"""Gets details of a single EntityType.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_entity_type():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetEntityTypeRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_entity_type(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetEntityTypeRequest, dict]):
The request object. Request message for
@@ -902,6 +1055,26 @@ async def list_entity_types(
) -> pagers.ListEntityTypesAsyncPager:
r"""Lists EntityTypes in a given Featurestore.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_entity_types():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListEntityTypesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_entity_types(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListEntityTypesRequest, dict]):
The request object. Request message for
@@ -984,6 +1157,24 @@ async def update_entity_type(
) -> gca_entity_type.EntityType:
r"""Updates the parameters of a single EntityType.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_update_entity_type():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.UpdateEntityTypeRequest(
+ )
+
+ # Make the request
+ response = client.update_entity_type(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateEntityTypeRequest, dict]):
The request object. Request message for
@@ -1087,6 +1278,30 @@ async def delete_entity_type(
Features or ``force`` must be set to true for the request to
succeed.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_entity_type():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteEntityTypeRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_entity_type(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteEntityTypeRequest, dict]):
The request object. Request message for
@@ -1193,6 +1408,34 @@ async def create_feature(
) -> operation_async.AsyncOperation:
r"""Creates a new Feature in a given EntityType.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_feature():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ feature = aiplatform_v1.Feature()
+ feature.value_type = "BYTES"
+
+ request = aiplatform_v1.CreateFeatureRequest(
+ parent="parent_value",
+ feature=feature,
+ feature_id="feature_id_value",
+ )
+
+ # Make the request
+ operation = client.create_feature(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateFeatureRequest, dict]):
The request object. Request message for
@@ -1300,6 +1543,35 @@ async def batch_create_features(
) -> operation_async.AsyncOperation:
r"""Creates a batch of Features in a given EntityType.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_batch_create_features():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ requests = aiplatform_v1.CreateFeatureRequest()
+ requests.parent = "parent_value"
+ requests.feature.value_type = "BYTES"
+ requests.feature_id = "feature_id_value"
+
+ request = aiplatform_v1.BatchCreateFeaturesRequest(
+ parent="parent_value",
+ requests=requests,
+ )
+
+ # Make the request
+ operation = client.batch_create_features(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.BatchCreateFeaturesRequest, dict]):
The request object. Request message for
@@ -1397,6 +1669,25 @@ async def get_feature(
) -> feature.Feature:
r"""Gets details of a single Feature.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_feature():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetFeatureRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_feature(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetFeatureRequest, dict]):
The request object. Request message for
@@ -1471,6 +1762,26 @@ async def list_features(
) -> pagers.ListFeaturesAsyncPager:
r"""Lists Features in a given EntityType.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_features():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListFeaturesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_features(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListFeaturesRequest, dict]):
The request object. Request message for
@@ -1553,6 +1864,28 @@ async def update_feature(
) -> gca_feature.Feature:
r"""Updates the parameters of a single Feature.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_update_feature():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ feature = aiplatform_v1.Feature()
+ feature.value_type = "BYTES"
+
+ request = aiplatform_v1.UpdateFeatureRequest(
+ feature=feature,
+ )
+
+ # Make the request
+ response = client.update_feature(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateFeatureRequest, dict]):
The request object. Request message for
@@ -1652,6 +1985,29 @@ async def delete_feature(
) -> operation_async.AsyncOperation:
r"""Deletes a single Feature.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_feature():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteFeatureRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_feature(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteFeatureRequest, dict]):
The request object. Request message for
@@ -1765,6 +2121,39 @@ async def import_feature_values(
or retention policy.
- Online serving cluster is under-provisioned.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_import_feature_values():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ avro_source = aiplatform_v1.AvroSource()
+ avro_source.gcs_source.uris = ['uris_value_1', 'uris_value_2']
+
+ feature_specs = aiplatform_v1.FeatureSpec()
+ feature_specs.id = "id_value"
+
+ request = aiplatform_v1.ImportFeatureValuesRequest(
+ avro_source=avro_source,
+ feature_time_field="feature_time_field_value",
+ entity_type="entity_type_value",
+ feature_specs=feature_specs,
+ )
+
+ # Make the request
+ operation = client.import_feature_values(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ImportFeatureValuesRequest, dict]):
The request object. Request message for
@@ -1857,6 +2246,43 @@ async def batch_read_feature_values(
correctness is guaranteed for Feature values of each
read instance as of each instance's read timestamp.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_batch_read_feature_values():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ csv_read_instances = aiplatform_v1.CsvSource()
+ csv_read_instances.gcs_source.uris = ['uris_value_1', 'uris_value_2']
+
+ destination = aiplatform_v1.FeatureValueDestination()
+ destination.bigquery_destination.output_uri = "output_uri_value"
+
+ entity_type_specs = aiplatform_v1.EntityTypeSpec()
+ entity_type_specs.entity_type_id = "entity_type_id_value"
+ entity_type_specs.feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2']
+
+ request = aiplatform_v1.BatchReadFeatureValuesRequest(
+ csv_read_instances=csv_read_instances,
+ featurestore="featurestore_value",
+ destination=destination,
+ entity_type_specs=entity_type_specs,
+ )
+
+ # Make the request
+ operation = client.batch_read_feature_values(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.BatchReadFeatureValuesRequest, dict]):
The request object. Request message for
@@ -1944,6 +2370,38 @@ async def export_feature_values(
r"""Exports Feature values from all the entities of a
target EntityType.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_export_feature_values():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ destination = aiplatform_v1.FeatureValueDestination()
+ destination.bigquery_destination.output_uri = "output_uri_value"
+
+ feature_selector = aiplatform_v1.FeatureSelector()
+ feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2']
+
+ request = aiplatform_v1.ExportFeatureValuesRequest(
+ entity_type="entity_type_value",
+ destination=destination,
+ feature_selector=feature_selector,
+ )
+
+ # Make the request
+ operation = client.export_feature_values(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ExportFeatureValuesRequest, dict]):
The request object. Request message for
@@ -2032,6 +2490,27 @@ async def search_features(
r"""Searches Features matching a query in a given
project.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_search_features():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.SearchFeaturesRequest(
+ location="location_value",
+ )
+
+ # Make the request
+ page_result = client.search_features(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.SearchFeaturesRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/client.py b/google/cloud/aiplatform_v1/services/featurestore_service/client.py
index 510f1080e7..65a0ca950c 100644
--- a/google/cloud/aiplatform_v1/services/featurestore_service/client.py
+++ b/google/cloud/aiplatform_v1/services/featurestore_service/client.py
@@ -473,6 +473,32 @@ def create_featurestore(
r"""Creates a new Featurestore in a given project and
location.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_featurestore():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.CreateFeaturestoreRequest(
+ parent="parent_value",
+ featurestore_id="featurestore_id_value",
+ )
+
+ # Make the request
+ operation = client.create_featurestore(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateFeaturestoreRequest, dict]):
The request object. Request message for
@@ -581,6 +607,26 @@ def get_featurestore(
) -> featurestore.Featurestore:
r"""Gets details of a single Featurestore.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_featurestore():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetFeaturestoreRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_featurestore(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetFeaturestoreRequest, dict]):
The request object. Request message for
@@ -655,6 +701,27 @@ def list_featurestores(
) -> pagers.ListFeaturestoresPager:
r"""Lists Featurestores in a given project and location.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_featurestores():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListFeaturestoresRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_featurestores(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListFeaturestoresRequest, dict]):
The request object. Request message for
@@ -737,6 +804,29 @@ def update_featurestore(
) -> gac_operation.Operation:
r"""Updates the parameters of a single Featurestore.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_update_featurestore():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.UpdateFeaturestoreRequest(
+ )
+
+ # Make the request
+ operation = client.update_featurestore(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateFeaturestoreRequest, dict]):
The request object. Request message for
@@ -846,6 +936,31 @@ def delete_featurestore(
any EntityTypes or ``force`` must be set to true for the request
to succeed.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_featurestore():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteFeaturestoreRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_featurestore(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteFeaturestoreRequest, dict]):
The request object. Request message for
@@ -953,6 +1068,31 @@ def create_entity_type(
) -> gac_operation.Operation:
r"""Creates a new EntityType in a given Featurestore.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_entity_type():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.CreateEntityTypeRequest(
+ parent="parent_value",
+ entity_type_id="entity_type_id_value",
+ )
+
+ # Make the request
+ operation = client.create_entity_type(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateEntityTypeRequest, dict]):
The request object. Request message for
@@ -1060,6 +1200,26 @@ def get_entity_type(
) -> entity_type.EntityType:
r"""Gets details of a single EntityType.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_entity_type():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetEntityTypeRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_entity_type(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetEntityTypeRequest, dict]):
The request object. Request message for
@@ -1135,6 +1295,27 @@ def list_entity_types(
) -> pagers.ListEntityTypesPager:
r"""Lists EntityTypes in a given Featurestore.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_entity_types():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListEntityTypesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_entity_types(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListEntityTypesRequest, dict]):
The request object. Request message for
@@ -1217,6 +1398,25 @@ def update_entity_type(
) -> gca_entity_type.EntityType:
r"""Updates the parameters of a single EntityType.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_update_entity_type():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.UpdateEntityTypeRequest(
+ )
+
+ # Make the request
+ response = client.update_entity_type(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateEntityTypeRequest, dict]):
The request object. Request message for
@@ -1320,6 +1520,31 @@ def delete_entity_type(
Features or ``force`` must be set to true for the request to
succeed.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_entity_type():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteEntityTypeRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_entity_type(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteEntityTypeRequest, dict]):
The request object. Request message for
@@ -1426,6 +1651,35 @@ def create_feature(
) -> gac_operation.Operation:
r"""Creates a new Feature in a given EntityType.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_feature():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ feature = aiplatform_v1.Feature()
+ feature.value_type = "BYTES"
+
+ request = aiplatform_v1.CreateFeatureRequest(
+ parent="parent_value",
+ feature=feature,
+ feature_id="feature_id_value",
+ )
+
+ # Make the request
+ operation = client.create_feature(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateFeatureRequest, dict]):
The request object. Request message for
@@ -1533,6 +1787,36 @@ def batch_create_features(
) -> gac_operation.Operation:
r"""Creates a batch of Features in a given EntityType.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_batch_create_features():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ requests = aiplatform_v1.CreateFeatureRequest()
+ requests.parent = "parent_value"
+ requests.feature.value_type = "BYTES"
+ requests.feature_id = "feature_id_value"
+
+ request = aiplatform_v1.BatchCreateFeaturesRequest(
+ parent="parent_value",
+ requests=requests,
+ )
+
+ # Make the request
+ operation = client.batch_create_features(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.BatchCreateFeaturesRequest, dict]):
The request object. Request message for
@@ -1630,6 +1914,26 @@ def get_feature(
) -> feature.Feature:
r"""Gets details of a single Feature.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_feature():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetFeatureRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_feature(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetFeatureRequest, dict]):
The request object. Request message for
@@ -1704,6 +2008,27 @@ def list_features(
) -> pagers.ListFeaturesPager:
r"""Lists Features in a given EntityType.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_features():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListFeaturesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_features(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListFeaturesRequest, dict]):
The request object. Request message for
@@ -1786,6 +2111,29 @@ def update_feature(
) -> gca_feature.Feature:
r"""Updates the parameters of a single Feature.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_update_feature():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ feature = aiplatform_v1.Feature()
+ feature.value_type = "BYTES"
+
+ request = aiplatform_v1.UpdateFeatureRequest(
+ feature=feature,
+ )
+
+ # Make the request
+ response = client.update_feature(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateFeatureRequest, dict]):
The request object. Request message for
@@ -1885,6 +2233,30 @@ def delete_feature(
) -> gac_operation.Operation:
r"""Deletes a single Feature.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_feature():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteFeatureRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_feature(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteFeatureRequest, dict]):
The request object. Request message for
@@ -1998,6 +2370,40 @@ def import_feature_values(
or retention policy.
- Online serving cluster is under-provisioned.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_import_feature_values():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ avro_source = aiplatform_v1.AvroSource()
+ avro_source.gcs_source.uris = ['uris_value_1', 'uris_value_2']
+
+ feature_specs = aiplatform_v1.FeatureSpec()
+ feature_specs.id = "id_value"
+
+ request = aiplatform_v1.ImportFeatureValuesRequest(
+ avro_source=avro_source,
+ feature_time_field="feature_time_field_value",
+ entity_type="entity_type_value",
+ feature_specs=feature_specs,
+ )
+
+ # Make the request
+ operation = client.import_feature_values(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ImportFeatureValuesRequest, dict]):
The request object. Request message for
@@ -2090,6 +2496,44 @@ def batch_read_feature_values(
correctness is guaranteed for Feature values of each
read instance as of each instance's read timestamp.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_batch_read_feature_values():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ csv_read_instances = aiplatform_v1.CsvSource()
+ csv_read_instances.gcs_source.uris = ['uris_value_1', 'uris_value_2']
+
+ destination = aiplatform_v1.FeatureValueDestination()
+ destination.bigquery_destination.output_uri = "output_uri_value"
+
+ entity_type_specs = aiplatform_v1.EntityTypeSpec()
+ entity_type_specs.entity_type_id = "entity_type_id_value"
+ entity_type_specs.feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2']
+
+ request = aiplatform_v1.BatchReadFeatureValuesRequest(
+ csv_read_instances=csv_read_instances,
+ featurestore="featurestore_value",
+ destination=destination,
+ entity_type_specs=entity_type_specs,
+ )
+
+ # Make the request
+ operation = client.batch_read_feature_values(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.BatchReadFeatureValuesRequest, dict]):
The request object. Request message for
@@ -2179,6 +2623,39 @@ def export_feature_values(
r"""Exports Feature values from all the entities of a
target EntityType.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_export_feature_values():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ destination = aiplatform_v1.FeatureValueDestination()
+ destination.bigquery_destination.output_uri = "output_uri_value"
+
+ feature_selector = aiplatform_v1.FeatureSelector()
+ feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2']
+
+ request = aiplatform_v1.ExportFeatureValuesRequest(
+ entity_type="entity_type_value",
+ destination=destination,
+ feature_selector=feature_selector,
+ )
+
+ # Make the request
+ operation = client.export_feature_values(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ExportFeatureValuesRequest, dict]):
The request object. Request message for
@@ -2267,6 +2744,28 @@ def search_features(
r"""Searches Features matching a query in a given
project.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_search_features():
+ # Create a client
+ client = aiplatform_v1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.SearchFeaturesRequest(
+ location="location_value",
+ )
+
+ # Make the request
+ page_result = client.search_features(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.SearchFeaturesRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py
index 61766a2fa3..433920b131 100644
--- a/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py
+++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py
@@ -227,6 +227,33 @@ async def create_index_endpoint(
) -> operation_async.AsyncOperation:
r"""Creates an IndexEndpoint.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_index_endpoint():
+ # Create a client
+ client = aiplatform_v1.IndexEndpointServiceClient()
+
+ # Initialize request argument(s)
+ index_endpoint = aiplatform_v1.IndexEndpoint()
+ index_endpoint.display_name = "display_name_value"
+
+ request = aiplatform_v1.CreateIndexEndpointRequest(
+ parent="parent_value",
+ index_endpoint=index_endpoint,
+ )
+
+ # Make the request
+ operation = client.create_index_endpoint(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateIndexEndpointRequest, dict]):
The request object. Request message for
@@ -318,6 +345,25 @@ async def get_index_endpoint(
) -> index_endpoint.IndexEndpoint:
r"""Gets an IndexEndpoint.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_index_endpoint():
+ # Create a client
+ client = aiplatform_v1.IndexEndpointServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetIndexEndpointRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_index_endpoint(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetIndexEndpointRequest, dict]):
The request object. Request message for
@@ -391,6 +437,26 @@ async def list_index_endpoints(
) -> pagers.ListIndexEndpointsAsyncPager:
r"""Lists IndexEndpoints in a Location.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_index_endpoints():
+ # Create a client
+ client = aiplatform_v1.IndexEndpointServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListIndexEndpointsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_index_endpoints(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListIndexEndpointsRequest, dict]):
The request object. Request message for
@@ -473,6 +539,28 @@ async def update_index_endpoint(
) -> gca_index_endpoint.IndexEndpoint:
r"""Updates an IndexEndpoint.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_update_index_endpoint():
+ # Create a client
+ client = aiplatform_v1.IndexEndpointServiceClient()
+
+ # Initialize request argument(s)
+ index_endpoint = aiplatform_v1.IndexEndpoint()
+ index_endpoint.display_name = "display_name_value"
+
+ request = aiplatform_v1.UpdateIndexEndpointRequest(
+ index_endpoint=index_endpoint,
+ )
+
+ # Make the request
+ response = client.update_index_endpoint(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateIndexEndpointRequest, dict]):
The request object. Request message for
@@ -556,6 +644,29 @@ async def delete_index_endpoint(
) -> operation_async.AsyncOperation:
r"""Deletes an IndexEndpoint.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_index_endpoint():
+ # Create a client
+ client = aiplatform_v1.IndexEndpointServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteIndexEndpointRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_index_endpoint(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteIndexEndpointRequest, dict]):
The request object. Request message for
@@ -652,6 +763,35 @@ async def deploy_index(
DeployedIndex within it.
Only non-empty Indexes can be deployed.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_deploy_index():
+ # Create a client
+ client = aiplatform_v1.IndexEndpointServiceClient()
+
+ # Initialize request argument(s)
+ deployed_index = aiplatform_v1.DeployedIndex()
+ deployed_index.id = "id_value"
+ deployed_index.index = "index_value"
+
+ request = aiplatform_v1.DeployIndexRequest(
+ index_endpoint="index_endpoint_value",
+ deployed_index=deployed_index,
+ )
+
+ # Make the request
+ operation = client.deploy_index(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeployIndexRequest, dict]):
The request object. Request message for
@@ -750,6 +890,31 @@ async def undeploy_index(
DeployedIndex from it, and freeing all resources it's
using.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_undeploy_index():
+ # Create a client
+ client = aiplatform_v1.IndexEndpointServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.UndeployIndexRequest(
+ index_endpoint="index_endpoint_value",
+ deployed_index_id="deployed_index_id_value",
+ )
+
+ # Make the request
+ operation = client.undeploy_index(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.UndeployIndexRequest, dict]):
The request object. Request message for
@@ -847,6 +1012,35 @@ async def mutate_deployed_index(
r"""Update an existing DeployedIndex under an
IndexEndpoint.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_mutate_deployed_index():
+ # Create a client
+ client = aiplatform_v1.IndexEndpointServiceClient()
+
+ # Initialize request argument(s)
+ deployed_index = aiplatform_v1.DeployedIndex()
+ deployed_index.id = "id_value"
+ deployed_index.index = "index_value"
+
+ request = aiplatform_v1.MutateDeployedIndexRequest(
+ index_endpoint="index_endpoint_value",
+ deployed_index=deployed_index,
+ )
+
+ # Make the request
+ operation = client.mutate_deployed_index(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.MutateDeployedIndexRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/client.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/client.py
index 55df2398e5..7d2362bad4 100644
--- a/google/cloud/aiplatform_v1/services/index_endpoint_service/client.py
+++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/client.py
@@ -437,6 +437,34 @@ def create_index_endpoint(
) -> gac_operation.Operation:
r"""Creates an IndexEndpoint.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_index_endpoint():
+ # Create a client
+ client = aiplatform_v1.IndexEndpointServiceClient()
+
+ # Initialize request argument(s)
+ index_endpoint = aiplatform_v1.IndexEndpoint()
+ index_endpoint.display_name = "display_name_value"
+
+ request = aiplatform_v1.CreateIndexEndpointRequest(
+ parent="parent_value",
+ index_endpoint=index_endpoint,
+ )
+
+ # Make the request
+ operation = client.create_index_endpoint(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateIndexEndpointRequest, dict]):
The request object. Request message for
@@ -528,6 +556,26 @@ def get_index_endpoint(
) -> index_endpoint.IndexEndpoint:
r"""Gets an IndexEndpoint.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_index_endpoint():
+ # Create a client
+ client = aiplatform_v1.IndexEndpointServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetIndexEndpointRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_index_endpoint(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetIndexEndpointRequest, dict]):
The request object. Request message for
@@ -601,6 +649,27 @@ def list_index_endpoints(
) -> pagers.ListIndexEndpointsPager:
r"""Lists IndexEndpoints in a Location.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_index_endpoints():
+ # Create a client
+ client = aiplatform_v1.IndexEndpointServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListIndexEndpointsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_index_endpoints(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListIndexEndpointsRequest, dict]):
The request object. Request message for
@@ -683,6 +752,29 @@ def update_index_endpoint(
) -> gca_index_endpoint.IndexEndpoint:
r"""Updates an IndexEndpoint.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_update_index_endpoint():
+ # Create a client
+ client = aiplatform_v1.IndexEndpointServiceClient()
+
+ # Initialize request argument(s)
+ index_endpoint = aiplatform_v1.IndexEndpoint()
+ index_endpoint.display_name = "display_name_value"
+
+ request = aiplatform_v1.UpdateIndexEndpointRequest(
+ index_endpoint=index_endpoint,
+ )
+
+ # Make the request
+ response = client.update_index_endpoint(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateIndexEndpointRequest, dict]):
The request object. Request message for
@@ -766,6 +858,30 @@ def delete_index_endpoint(
) -> gac_operation.Operation:
r"""Deletes an IndexEndpoint.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_index_endpoint():
+ # Create a client
+ client = aiplatform_v1.IndexEndpointServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteIndexEndpointRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_index_endpoint(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteIndexEndpointRequest, dict]):
The request object. Request message for
@@ -862,6 +978,36 @@ def deploy_index(
DeployedIndex within it.
Only non-empty Indexes can be deployed.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_deploy_index():
+ # Create a client
+ client = aiplatform_v1.IndexEndpointServiceClient()
+
+ # Initialize request argument(s)
+ deployed_index = aiplatform_v1.DeployedIndex()
+ deployed_index.id = "id_value"
+ deployed_index.index = "index_value"
+
+ request = aiplatform_v1.DeployIndexRequest(
+ index_endpoint="index_endpoint_value",
+ deployed_index=deployed_index,
+ )
+
+ # Make the request
+ operation = client.deploy_index(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeployIndexRequest, dict]):
The request object. Request message for
@@ -960,6 +1106,32 @@ def undeploy_index(
DeployedIndex from it, and freeing all resources it's
using.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_undeploy_index():
+ # Create a client
+ client = aiplatform_v1.IndexEndpointServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.UndeployIndexRequest(
+ index_endpoint="index_endpoint_value",
+ deployed_index_id="deployed_index_id_value",
+ )
+
+ # Make the request
+ operation = client.undeploy_index(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.UndeployIndexRequest, dict]):
The request object. Request message for
@@ -1057,6 +1229,36 @@ def mutate_deployed_index(
r"""Update an existing DeployedIndex under an
IndexEndpoint.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_mutate_deployed_index():
+ # Create a client
+ client = aiplatform_v1.IndexEndpointServiceClient()
+
+ # Initialize request argument(s)
+ deployed_index = aiplatform_v1.DeployedIndex()
+ deployed_index.id = "id_value"
+ deployed_index.index = "index_value"
+
+ request = aiplatform_v1.MutateDeployedIndexRequest(
+ index_endpoint="index_endpoint_value",
+ deployed_index=deployed_index,
+ )
+
+ # Make the request
+ operation = client.mutate_deployed_index(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.MutateDeployedIndexRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1/services/index_service/async_client.py b/google/cloud/aiplatform_v1/services/index_service/async_client.py
index 6b15fe0b3f..5df1f644eb 100644
--- a/google/cloud/aiplatform_v1/services/index_service/async_client.py
+++ b/google/cloud/aiplatform_v1/services/index_service/async_client.py
@@ -226,6 +226,33 @@ async def create_index(
) -> operation_async.AsyncOperation:
r"""Creates an Index.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_index():
+ # Create a client
+ client = aiplatform_v1.IndexServiceClient()
+
+ # Initialize request argument(s)
+ index = aiplatform_v1.Index()
+ index.display_name = "display_name_value"
+
+ request = aiplatform_v1.CreateIndexRequest(
+ parent="parent_value",
+ index=index,
+ )
+
+ # Make the request
+ operation = client.create_index(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateIndexRequest, dict]):
The request object. Request message for
@@ -316,6 +343,25 @@ async def get_index(
) -> index.Index:
r"""Gets an Index.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_index():
+ # Create a client
+ client = aiplatform_v1.IndexServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetIndexRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_index(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetIndexRequest, dict]):
The request object. Request message for
@@ -389,6 +435,26 @@ async def list_indexes(
) -> pagers.ListIndexesAsyncPager:
r"""Lists Indexes in a Location.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_indexes():
+ # Create a client
+ client = aiplatform_v1.IndexServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListIndexesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_indexes(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListIndexesRequest, dict]):
The request object. Request message for
@@ -471,6 +537,32 @@ async def update_index(
) -> operation_async.AsyncOperation:
r"""Updates an Index.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_update_index():
+ # Create a client
+ client = aiplatform_v1.IndexServiceClient()
+
+ # Initialize request argument(s)
+ index = aiplatform_v1.Index()
+ index.display_name = "display_name_value"
+
+ request = aiplatform_v1.UpdateIndexRequest(
+ index=index,
+ )
+
+ # Make the request
+ operation = client.update_index(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateIndexRequest, dict]):
The request object. Request message for
@@ -567,6 +659,30 @@ async def delete_index(
[DeployedIndexes][google.cloud.aiplatform.v1.Index.deployed_indexes]
had been undeployed.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_index():
+ # Create a client
+ client = aiplatform_v1.IndexServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteIndexRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_index(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteIndexRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1/services/index_service/client.py b/google/cloud/aiplatform_v1/services/index_service/client.py
index ac5370dc4c..3b96814bab 100644
--- a/google/cloud/aiplatform_v1/services/index_service/client.py
+++ b/google/cloud/aiplatform_v1/services/index_service/client.py
@@ -437,6 +437,34 @@ def create_index(
) -> gac_operation.Operation:
r"""Creates an Index.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_index():
+ # Create a client
+ client = aiplatform_v1.IndexServiceClient()
+
+ # Initialize request argument(s)
+ index = aiplatform_v1.Index()
+ index.display_name = "display_name_value"
+
+ request = aiplatform_v1.CreateIndexRequest(
+ parent="parent_value",
+ index=index,
+ )
+
+ # Make the request
+ operation = client.create_index(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateIndexRequest, dict]):
The request object. Request message for
@@ -527,6 +555,26 @@ def get_index(
) -> index.Index:
r"""Gets an Index.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_index():
+ # Create a client
+ client = aiplatform_v1.IndexServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetIndexRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_index(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetIndexRequest, dict]):
The request object. Request message for
@@ -600,6 +648,27 @@ def list_indexes(
) -> pagers.ListIndexesPager:
r"""Lists Indexes in a Location.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_indexes():
+ # Create a client
+ client = aiplatform_v1.IndexServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListIndexesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_indexes(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListIndexesRequest, dict]):
The request object. Request message for
@@ -682,6 +751,33 @@ def update_index(
) -> gac_operation.Operation:
r"""Updates an Index.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_update_index():
+ # Create a client
+ client = aiplatform_v1.IndexServiceClient()
+
+ # Initialize request argument(s)
+ index = aiplatform_v1.Index()
+ index.display_name = "display_name_value"
+
+ request = aiplatform_v1.UpdateIndexRequest(
+ index=index,
+ )
+
+ # Make the request
+ operation = client.update_index(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateIndexRequest, dict]):
The request object. Request message for
@@ -778,6 +874,31 @@ def delete_index(
[DeployedIndexes][google.cloud.aiplatform.v1.Index.deployed_indexes]
had been undeployed.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_index():
+ # Create a client
+ client = aiplatform_v1.IndexServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteIndexRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_index(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteIndexRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1/services/job_service/async_client.py b/google/cloud/aiplatform_v1/services/job_service/async_client.py
index 96a4158a1b..629ec3410f 100644
--- a/google/cloud/aiplatform_v1/services/job_service/async_client.py
+++ b/google/cloud/aiplatform_v1/services/job_service/async_client.py
@@ -277,6 +277,31 @@ async def create_custom_job(
r"""Creates a CustomJob. A created CustomJob right away
will be attempted to be run.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_custom_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ custom_job = aiplatform_v1.CustomJob()
+ custom_job.display_name = "display_name_value"
+ custom_job.job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value"
+
+ request = aiplatform_v1.CreateCustomJobRequest(
+ parent="parent_value",
+ custom_job=custom_job,
+ )
+
+ # Make the request
+ response = client.create_custom_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateCustomJobRequest, dict]):
The request object. Request message for
@@ -362,6 +387,25 @@ async def get_custom_job(
) -> custom_job.CustomJob:
r"""Gets a CustomJob.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_custom_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetCustomJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_custom_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetCustomJobRequest, dict]):
The request object. Request message for
@@ -439,6 +483,26 @@ async def list_custom_jobs(
) -> pagers.ListCustomJobsAsyncPager:
r"""Lists CustomJobs in a Location.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_custom_jobs():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListCustomJobsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_custom_jobs(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListCustomJobsRequest, dict]):
The request object. Request message for
@@ -520,6 +584,29 @@ async def delete_custom_job(
) -> operation_async.AsyncOperation:
r"""Deletes a CustomJob.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_custom_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteCustomJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_custom_job(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteCustomJobRequest, dict]):
The request object. Request message for
@@ -625,6 +712,23 @@ async def cancel_custom_job(
[CustomJob.state][google.cloud.aiplatform.v1.CustomJob.state] is
set to ``CANCELLED``.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_cancel_custom_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.CancelCustomJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.cancel_custom_job(request=request)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CancelCustomJobRequest, dict]):
The request object. Request message for
@@ -690,6 +794,34 @@ async def create_data_labeling_job(
) -> gca_data_labeling_job.DataLabelingJob:
r"""Creates a DataLabelingJob.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_data_labeling_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ data_labeling_job = aiplatform_v1.DataLabelingJob()
+ data_labeling_job.display_name = "display_name_value"
+ data_labeling_job.datasets = ['datasets_value_1', 'datasets_value_2']
+ data_labeling_job.labeler_count = 1375
+ data_labeling_job.instruction_uri = "instruction_uri_value"
+ data_labeling_job.inputs_schema_uri = "inputs_schema_uri_value"
+ data_labeling_job.inputs.null_value = "NULL_VALUE"
+
+ request = aiplatform_v1.CreateDataLabelingJobRequest(
+ parent="parent_value",
+ data_labeling_job=data_labeling_job,
+ )
+
+ # Make the request
+ response = client.create_data_labeling_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateDataLabelingJobRequest, dict]):
The request object. Request message for
@@ -771,6 +903,25 @@ async def get_data_labeling_job(
) -> data_labeling_job.DataLabelingJob:
r"""Gets a DataLabelingJob.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_data_labeling_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetDataLabelingJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_data_labeling_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetDataLabelingJobRequest, dict]):
The request object. Request message for
@@ -843,6 +994,26 @@ async def list_data_labeling_jobs(
) -> pagers.ListDataLabelingJobsAsyncPager:
r"""Lists DataLabelingJobs in a Location.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_data_labeling_jobs():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListDataLabelingJobsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_data_labeling_jobs(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListDataLabelingJobsRequest, dict]):
The request object. Request message for
@@ -923,6 +1094,29 @@ async def delete_data_labeling_job(
) -> operation_async.AsyncOperation:
r"""Deletes a DataLabelingJob.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_data_labeling_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteDataLabelingJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_data_labeling_job(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteDataLabelingJobRequest, dict]):
The request object. Request message for
@@ -1017,6 +1211,23 @@ async def cancel_data_labeling_job(
r"""Cancels a DataLabelingJob. Success of cancellation is
not guaranteed.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_cancel_data_labeling_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.CancelDataLabelingJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.cancel_data_labeling_job(request=request)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CancelDataLabelingJobRequest, dict]):
The request object. Request message for
@@ -1082,6 +1293,37 @@ async def create_hyperparameter_tuning_job(
) -> gca_hyperparameter_tuning_job.HyperparameterTuningJob:
r"""Creates a HyperparameterTuningJob
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_hyperparameter_tuning_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ hyperparameter_tuning_job = aiplatform_v1.HyperparameterTuningJob()
+ hyperparameter_tuning_job.display_name = "display_name_value"
+ hyperparameter_tuning_job.study_spec.metrics.metric_id = "metric_id_value"
+ hyperparameter_tuning_job.study_spec.metrics.goal = "MINIMIZE"
+ hyperparameter_tuning_job.study_spec.parameters.double_value_spec.min_value = 0.96
+ hyperparameter_tuning_job.study_spec.parameters.double_value_spec.max_value = 0.962
+ hyperparameter_tuning_job.study_spec.parameters.parameter_id = "parameter_id_value"
+ hyperparameter_tuning_job.max_trial_count = 1609
+ hyperparameter_tuning_job.parallel_trial_count = 2128
+ hyperparameter_tuning_job.trial_job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value"
+
+ request = aiplatform_v1.CreateHyperparameterTuningJobRequest(
+ parent="parent_value",
+ hyperparameter_tuning_job=hyperparameter_tuning_job,
+ )
+
+ # Make the request
+ response = client.create_hyperparameter_tuning_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateHyperparameterTuningJobRequest, dict]):
The request object. Request message for
@@ -1165,6 +1407,25 @@ async def get_hyperparameter_tuning_job(
) -> hyperparameter_tuning_job.HyperparameterTuningJob:
r"""Gets a HyperparameterTuningJob
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_hyperparameter_tuning_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetHyperparameterTuningJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_hyperparameter_tuning_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetHyperparameterTuningJobRequest, dict]):
The request object. Request message for
@@ -1239,6 +1500,26 @@ async def list_hyperparameter_tuning_jobs(
) -> pagers.ListHyperparameterTuningJobsAsyncPager:
r"""Lists HyperparameterTuningJobs in a Location.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_hyperparameter_tuning_jobs():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListHyperparameterTuningJobsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_hyperparameter_tuning_jobs(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsRequest, dict]):
The request object. Request message for
@@ -1320,6 +1601,29 @@ async def delete_hyperparameter_tuning_job(
) -> operation_async.AsyncOperation:
r"""Deletes a HyperparameterTuningJob.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_hyperparameter_tuning_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteHyperparameterTuningJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_hyperparameter_tuning_job(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteHyperparameterTuningJobRequest, dict]):
The request object. Request message for
@@ -1426,6 +1730,23 @@ async def cancel_hyperparameter_tuning_job(
[HyperparameterTuningJob.state][google.cloud.aiplatform.v1.HyperparameterTuningJob.state]
is set to ``CANCELLED``.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_cancel_hyperparameter_tuning_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.CancelHyperparameterTuningJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.cancel_hyperparameter_tuning_job(request=request)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CancelHyperparameterTuningJobRequest, dict]):
The request object. Request message for
@@ -1493,6 +1814,34 @@ async def create_batch_prediction_job(
r"""Creates a BatchPredictionJob. A BatchPredictionJob
once created will right away be attempted to start.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_batch_prediction_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ batch_prediction_job = aiplatform_v1.BatchPredictionJob()
+ batch_prediction_job.display_name = "display_name_value"
+ batch_prediction_job.input_config.gcs_source.uris = ['uris_value_1', 'uris_value_2']
+ batch_prediction_job.input_config.instances_format = "instances_format_value"
+ batch_prediction_job.output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value"
+ batch_prediction_job.output_config.predictions_format = "predictions_format_value"
+
+ request = aiplatform_v1.CreateBatchPredictionJobRequest(
+ parent="parent_value",
+ batch_prediction_job=batch_prediction_job,
+ )
+
+ # Make the request
+ response = client.create_batch_prediction_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateBatchPredictionJobRequest, dict]):
The request object. Request message for
@@ -1578,6 +1927,25 @@ async def get_batch_prediction_job(
) -> batch_prediction_job.BatchPredictionJob:
r"""Gets a BatchPredictionJob
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_batch_prediction_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetBatchPredictionJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_batch_prediction_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetBatchPredictionJobRequest, dict]):
The request object. Request message for
@@ -1654,6 +2022,26 @@ async def list_batch_prediction_jobs(
) -> pagers.ListBatchPredictionJobsAsyncPager:
r"""Lists BatchPredictionJobs in a Location.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_batch_prediction_jobs():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListBatchPredictionJobsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_batch_prediction_jobs(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListBatchPredictionJobsRequest, dict]):
The request object. Request message for
@@ -1736,6 +2124,30 @@ async def delete_batch_prediction_job(
r"""Deletes a BatchPredictionJob. Can only be called on
jobs that already finished.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_batch_prediction_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteBatchPredictionJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_batch_prediction_job(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteBatchPredictionJobRequest, dict]):
The request object. Request message for
@@ -1840,6 +2252,23 @@ async def cancel_batch_prediction_job(
is set to ``CANCELLED``. Any files already outputted by the job
are not deleted.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_cancel_batch_prediction_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.CancelBatchPredictionJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.cancel_batch_prediction_job(request=request)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CancelBatchPredictionJobRequest, dict]):
The request object. Request message for
@@ -1909,6 +2338,31 @@ async def create_model_deployment_monitoring_job(
r"""Creates a ModelDeploymentMonitoringJob. It will run
periodically on a configured interval.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_model_deployment_monitoring_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ model_deployment_monitoring_job = aiplatform_v1.ModelDeploymentMonitoringJob()
+ model_deployment_monitoring_job.display_name = "display_name_value"
+ model_deployment_monitoring_job.endpoint = "endpoint_value"
+
+ request = aiplatform_v1.CreateModelDeploymentMonitoringJobRequest(
+ parent="parent_value",
+ model_deployment_monitoring_job=model_deployment_monitoring_job,
+ )
+
+ # Make the request
+ response = client.create_model_deployment_monitoring_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateModelDeploymentMonitoringJobRequest, dict]):
The request object. Request message for
@@ -1997,6 +2451,28 @@ async def search_model_deployment_monitoring_stats_anomalies(
r"""Searches Model Monitoring Statistics generated within
a given time window.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_search_model_deployment_monitoring_stats_anomalies():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.SearchModelDeploymentMonitoringStatsAnomaliesRequest(
+ model_deployment_monitoring_job="model_deployment_monitoring_job_value",
+ deployed_model_id="deployed_model_id_value",
+ )
+
+ # Make the request
+ page_result = client.search_model_deployment_monitoring_stats_anomalies(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest, dict]):
The request object. Request message for
@@ -2004,7 +2480,7 @@ async def search_model_deployment_monitoring_stats_anomalies(
model_deployment_monitoring_job (:class:`str`):
Required. ModelDeploymentMonitoring Job resource name.
Format:
- \`projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}
+ ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}``
This corresponds to the ``model_deployment_monitoring_job`` field
on the ``request`` instance; if ``request`` is provided, this
@@ -2096,6 +2572,25 @@ async def get_model_deployment_monitoring_job(
) -> model_deployment_monitoring_job.ModelDeploymentMonitoringJob:
r"""Gets a ModelDeploymentMonitoringJob.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_model_deployment_monitoring_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetModelDeploymentMonitoringJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_model_deployment_monitoring_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetModelDeploymentMonitoringJobRequest, dict]):
The request object. Request message for
@@ -2173,6 +2668,26 @@ async def list_model_deployment_monitoring_jobs(
) -> pagers.ListModelDeploymentMonitoringJobsAsyncPager:
r"""Lists ModelDeploymentMonitoringJobs in a Location.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_model_deployment_monitoring_jobs():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListModelDeploymentMonitoringJobsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_model_deployment_monitoring_jobs(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListModelDeploymentMonitoringJobsRequest, dict]):
The request object. Request message for
@@ -2257,6 +2772,33 @@ async def update_model_deployment_monitoring_job(
) -> operation_async.AsyncOperation:
r"""Updates a ModelDeploymentMonitoringJob.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_update_model_deployment_monitoring_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ model_deployment_monitoring_job = aiplatform_v1.ModelDeploymentMonitoringJob()
+ model_deployment_monitoring_job.display_name = "display_name_value"
+ model_deployment_monitoring_job.endpoint = "endpoint_value"
+
+ request = aiplatform_v1.UpdateModelDeploymentMonitoringJobRequest(
+ model_deployment_monitoring_job=model_deployment_monitoring_job,
+ )
+
+ # Make the request
+ operation = client.update_model_deployment_monitoring_job(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateModelDeploymentMonitoringJobRequest, dict]):
The request object. Request message for
@@ -2384,6 +2926,29 @@ async def delete_model_deployment_monitoring_job(
) -> operation_async.AsyncOperation:
r"""Deletes a ModelDeploymentMonitoringJob.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_model_deployment_monitoring_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteModelDeploymentMonitoringJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_model_deployment_monitoring_job(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteModelDeploymentMonitoringJobRequest, dict]):
The request object. Request message for
@@ -2482,6 +3047,23 @@ async def pause_model_deployment_monitoring_job(
[ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1.ModelDeploymentMonitoringJob.state]
to 'PAUSED'.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_pause_model_deployment_monitoring_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.PauseModelDeploymentMonitoringJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.pause_model_deployment_monitoring_job(request=request)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.PauseModelDeploymentMonitoringJobRequest, dict]):
The request object. Request message for
@@ -2551,6 +3133,23 @@ async def resume_model_deployment_monitoring_job(
will start to run from next scheduled time. A deleted
ModelDeploymentMonitoringJob can't be resumed.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_resume_model_deployment_monitoring_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ResumeModelDeploymentMonitoringJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.resume_model_deployment_monitoring_job(request=request)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ResumeModelDeploymentMonitoringJobRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1/services/job_service/client.py b/google/cloud/aiplatform_v1/services/job_service/client.py
index 65571cda77..cd8d4dfd8b 100644
--- a/google/cloud/aiplatform_v1/services/job_service/client.py
+++ b/google/cloud/aiplatform_v1/services/job_service/client.py
@@ -619,6 +619,32 @@ def create_custom_job(
r"""Creates a CustomJob. A created CustomJob right away
will be attempted to be run.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_custom_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ custom_job = aiplatform_v1.CustomJob()
+ custom_job.display_name = "display_name_value"
+ custom_job.job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value"
+
+ request = aiplatform_v1.CreateCustomJobRequest(
+ parent="parent_value",
+ custom_job=custom_job,
+ )
+
+ # Make the request
+ response = client.create_custom_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateCustomJobRequest, dict]):
The request object. Request message for
@@ -704,6 +730,26 @@ def get_custom_job(
) -> custom_job.CustomJob:
r"""Gets a CustomJob.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_custom_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetCustomJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_custom_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetCustomJobRequest, dict]):
The request object. Request message for
@@ -781,6 +827,27 @@ def list_custom_jobs(
) -> pagers.ListCustomJobsPager:
r"""Lists CustomJobs in a Location.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_custom_jobs():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListCustomJobsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_custom_jobs(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListCustomJobsRequest, dict]):
The request object. Request message for
@@ -862,6 +929,30 @@ def delete_custom_job(
) -> gac_operation.Operation:
r"""Deletes a CustomJob.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_custom_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteCustomJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_custom_job(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteCustomJobRequest, dict]):
The request object. Request message for
@@ -967,6 +1058,24 @@ def cancel_custom_job(
[CustomJob.state][google.cloud.aiplatform.v1.CustomJob.state] is
set to ``CANCELLED``.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_cancel_custom_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.CancelCustomJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.cancel_custom_job(request=request)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CancelCustomJobRequest, dict]):
The request object. Request message for
@@ -1032,6 +1141,35 @@ def create_data_labeling_job(
) -> gca_data_labeling_job.DataLabelingJob:
r"""Creates a DataLabelingJob.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_data_labeling_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ data_labeling_job = aiplatform_v1.DataLabelingJob()
+ data_labeling_job.display_name = "display_name_value"
+ data_labeling_job.datasets = ['datasets_value_1', 'datasets_value_2']
+ data_labeling_job.labeler_count = 1375
+ data_labeling_job.instruction_uri = "instruction_uri_value"
+ data_labeling_job.inputs_schema_uri = "inputs_schema_uri_value"
+ data_labeling_job.inputs.null_value = "NULL_VALUE"
+
+ request = aiplatform_v1.CreateDataLabelingJobRequest(
+ parent="parent_value",
+ data_labeling_job=data_labeling_job,
+ )
+
+ # Make the request
+ response = client.create_data_labeling_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateDataLabelingJobRequest, dict]):
The request object. Request message for
@@ -1113,6 +1251,26 @@ def get_data_labeling_job(
) -> data_labeling_job.DataLabelingJob:
r"""Gets a DataLabelingJob.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_data_labeling_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetDataLabelingJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_data_labeling_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetDataLabelingJobRequest, dict]):
The request object. Request message for
@@ -1185,6 +1343,27 @@ def list_data_labeling_jobs(
) -> pagers.ListDataLabelingJobsPager:
r"""Lists DataLabelingJobs in a Location.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_data_labeling_jobs():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListDataLabelingJobsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_data_labeling_jobs(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListDataLabelingJobsRequest, dict]):
The request object. Request message for
@@ -1265,6 +1444,30 @@ def delete_data_labeling_job(
) -> gac_operation.Operation:
r"""Deletes a DataLabelingJob.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_data_labeling_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteDataLabelingJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_data_labeling_job(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteDataLabelingJobRequest, dict]):
The request object. Request message for
@@ -1359,6 +1562,24 @@ def cancel_data_labeling_job(
r"""Cancels a DataLabelingJob. Success of cancellation is
not guaranteed.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_cancel_data_labeling_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.CancelDataLabelingJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.cancel_data_labeling_job(request=request)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CancelDataLabelingJobRequest, dict]):
The request object. Request message for
@@ -1424,6 +1645,38 @@ def create_hyperparameter_tuning_job(
) -> gca_hyperparameter_tuning_job.HyperparameterTuningJob:
r"""Creates a HyperparameterTuningJob
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_hyperparameter_tuning_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ hyperparameter_tuning_job = aiplatform_v1.HyperparameterTuningJob()
+ hyperparameter_tuning_job.display_name = "display_name_value"
+ hyperparameter_tuning_job.study_spec.metrics.metric_id = "metric_id_value"
+ hyperparameter_tuning_job.study_spec.metrics.goal = "MINIMIZE"
+ hyperparameter_tuning_job.study_spec.parameters.double_value_spec.min_value = 0.96
+ hyperparameter_tuning_job.study_spec.parameters.double_value_spec.max_value = 0.962
+ hyperparameter_tuning_job.study_spec.parameters.parameter_id = "parameter_id_value"
+ hyperparameter_tuning_job.max_trial_count = 1609
+ hyperparameter_tuning_job.parallel_trial_count = 2128
+ hyperparameter_tuning_job.trial_job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value"
+
+ request = aiplatform_v1.CreateHyperparameterTuningJobRequest(
+ parent="parent_value",
+ hyperparameter_tuning_job=hyperparameter_tuning_job,
+ )
+
+ # Make the request
+ response = client.create_hyperparameter_tuning_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateHyperparameterTuningJobRequest, dict]):
The request object. Request message for
@@ -1509,6 +1762,26 @@ def get_hyperparameter_tuning_job(
) -> hyperparameter_tuning_job.HyperparameterTuningJob:
r"""Gets a HyperparameterTuningJob
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_hyperparameter_tuning_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetHyperparameterTuningJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_hyperparameter_tuning_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetHyperparameterTuningJobRequest, dict]):
The request object. Request message for
@@ -1585,6 +1858,27 @@ def list_hyperparameter_tuning_jobs(
) -> pagers.ListHyperparameterTuningJobsPager:
r"""Lists HyperparameterTuningJobs in a Location.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_hyperparameter_tuning_jobs():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListHyperparameterTuningJobsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_hyperparameter_tuning_jobs(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsRequest, dict]):
The request object. Request message for
@@ -1668,6 +1962,30 @@ def delete_hyperparameter_tuning_job(
) -> gac_operation.Operation:
r"""Deletes a HyperparameterTuningJob.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_hyperparameter_tuning_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteHyperparameterTuningJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_hyperparameter_tuning_job(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteHyperparameterTuningJobRequest, dict]):
The request object. Request message for
@@ -1776,6 +2094,24 @@ def cancel_hyperparameter_tuning_job(
[HyperparameterTuningJob.state][google.cloud.aiplatform.v1.HyperparameterTuningJob.state]
is set to ``CANCELLED``.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_cancel_hyperparameter_tuning_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.CancelHyperparameterTuningJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.cancel_hyperparameter_tuning_job(request=request)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CancelHyperparameterTuningJobRequest, dict]):
The request object. Request message for
@@ -1845,6 +2181,35 @@ def create_batch_prediction_job(
r"""Creates a BatchPredictionJob. A BatchPredictionJob
once created will right away be attempted to start.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_batch_prediction_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ batch_prediction_job = aiplatform_v1.BatchPredictionJob()
+ batch_prediction_job.display_name = "display_name_value"
+ batch_prediction_job.input_config.gcs_source.uris = ['uris_value_1', 'uris_value_2']
+ batch_prediction_job.input_config.instances_format = "instances_format_value"
+ batch_prediction_job.output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value"
+ batch_prediction_job.output_config.predictions_format = "predictions_format_value"
+
+ request = aiplatform_v1.CreateBatchPredictionJobRequest(
+ parent="parent_value",
+ batch_prediction_job=batch_prediction_job,
+ )
+
+ # Make the request
+ response = client.create_batch_prediction_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateBatchPredictionJobRequest, dict]):
The request object. Request message for
@@ -1932,6 +2297,26 @@ def get_batch_prediction_job(
) -> batch_prediction_job.BatchPredictionJob:
r"""Gets a BatchPredictionJob
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_batch_prediction_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetBatchPredictionJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_batch_prediction_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetBatchPredictionJobRequest, dict]):
The request object. Request message for
@@ -2008,6 +2393,27 @@ def list_batch_prediction_jobs(
) -> pagers.ListBatchPredictionJobsPager:
r"""Lists BatchPredictionJobs in a Location.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_batch_prediction_jobs():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListBatchPredictionJobsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_batch_prediction_jobs(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListBatchPredictionJobsRequest, dict]):
The request object. Request message for
@@ -2092,6 +2498,31 @@ def delete_batch_prediction_job(
r"""Deletes a BatchPredictionJob. Can only be called on
jobs that already finished.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_batch_prediction_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteBatchPredictionJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_batch_prediction_job(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteBatchPredictionJobRequest, dict]):
The request object. Request message for
@@ -2198,6 +2629,24 @@ def cancel_batch_prediction_job(
is set to ``CANCELLED``. Any files already outputted by the job
are not deleted.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_cancel_batch_prediction_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.CancelBatchPredictionJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.cancel_batch_prediction_job(request=request)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CancelBatchPredictionJobRequest, dict]):
The request object. Request message for
@@ -2269,6 +2718,32 @@ def create_model_deployment_monitoring_job(
r"""Creates a ModelDeploymentMonitoringJob. It will run
periodically on a configured interval.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_model_deployment_monitoring_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ model_deployment_monitoring_job = aiplatform_v1.ModelDeploymentMonitoringJob()
+ model_deployment_monitoring_job.display_name = "display_name_value"
+ model_deployment_monitoring_job.endpoint = "endpoint_value"
+
+ request = aiplatform_v1.CreateModelDeploymentMonitoringJobRequest(
+ parent="parent_value",
+ model_deployment_monitoring_job=model_deployment_monitoring_job,
+ )
+
+ # Make the request
+ response = client.create_model_deployment_monitoring_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateModelDeploymentMonitoringJobRequest, dict]):
The request object. Request message for
@@ -2363,6 +2838,29 @@ def search_model_deployment_monitoring_stats_anomalies(
r"""Searches Model Monitoring Statistics generated within
a given time window.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_search_model_deployment_monitoring_stats_anomalies():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.SearchModelDeploymentMonitoringStatsAnomaliesRequest(
+ model_deployment_monitoring_job="model_deployment_monitoring_job_value",
+ deployed_model_id="deployed_model_id_value",
+ )
+
+ # Make the request
+ page_result = client.search_model_deployment_monitoring_stats_anomalies(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest, dict]):
The request object. Request message for
@@ -2370,7 +2868,7 @@ def search_model_deployment_monitoring_stats_anomalies(
model_deployment_monitoring_job (str):
Required. ModelDeploymentMonitoring Job resource name.
Format:
- \`projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}
+ ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}``
This corresponds to the ``model_deployment_monitoring_job`` field
on the ``request`` instance; if ``request`` is provided, this
@@ -2468,6 +2966,26 @@ def get_model_deployment_monitoring_job(
) -> model_deployment_monitoring_job.ModelDeploymentMonitoringJob:
r"""Gets a ModelDeploymentMonitoringJob.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_model_deployment_monitoring_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetModelDeploymentMonitoringJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_model_deployment_monitoring_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetModelDeploymentMonitoringJobRequest, dict]):
The request object. Request message for
@@ -2547,6 +3065,27 @@ def list_model_deployment_monitoring_jobs(
) -> pagers.ListModelDeploymentMonitoringJobsPager:
r"""Lists ModelDeploymentMonitoringJobs in a Location.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_model_deployment_monitoring_jobs():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListModelDeploymentMonitoringJobsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_model_deployment_monitoring_jobs(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListModelDeploymentMonitoringJobsRequest, dict]):
The request object. Request message for
@@ -2635,6 +3174,34 @@ def update_model_deployment_monitoring_job(
) -> gac_operation.Operation:
r"""Updates a ModelDeploymentMonitoringJob.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_update_model_deployment_monitoring_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ model_deployment_monitoring_job = aiplatform_v1.ModelDeploymentMonitoringJob()
+ model_deployment_monitoring_job.display_name = "display_name_value"
+ model_deployment_monitoring_job.endpoint = "endpoint_value"
+
+ request = aiplatform_v1.UpdateModelDeploymentMonitoringJobRequest(
+ model_deployment_monitoring_job=model_deployment_monitoring_job,
+ )
+
+ # Make the request
+ operation = client.update_model_deployment_monitoring_job(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateModelDeploymentMonitoringJobRequest, dict]):
The request object. Request message for
@@ -2768,6 +3335,30 @@ def delete_model_deployment_monitoring_job(
) -> gac_operation.Operation:
r"""Deletes a ModelDeploymentMonitoringJob.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_model_deployment_monitoring_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteModelDeploymentMonitoringJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_model_deployment_monitoring_job(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteModelDeploymentMonitoringJobRequest, dict]):
The request object. Request message for
@@ -2870,6 +3461,24 @@ def pause_model_deployment_monitoring_job(
[ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1.ModelDeploymentMonitoringJob.state]
to 'PAUSED'.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_pause_model_deployment_monitoring_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.PauseModelDeploymentMonitoringJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.pause_model_deployment_monitoring_job(request=request)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.PauseModelDeploymentMonitoringJobRequest, dict]):
The request object. Request message for
@@ -2943,6 +3552,24 @@ def resume_model_deployment_monitoring_job(
will start to run from next scheduled time. A deleted
ModelDeploymentMonitoringJob can't be resumed.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_resume_model_deployment_monitoring_job():
+ # Create a client
+ client = aiplatform_v1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ResumeModelDeploymentMonitoringJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.resume_model_deployment_monitoring_job(request=request)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ResumeModelDeploymentMonitoringJobRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1/services/metadata_service/async_client.py b/google/cloud/aiplatform_v1/services/metadata_service/async_client.py
index a7ce7c5617..64effd4ac6 100644
--- a/google/cloud/aiplatform_v1/services/metadata_service/async_client.py
+++ b/google/cloud/aiplatform_v1/services/metadata_service/async_client.py
@@ -248,6 +248,30 @@ async def create_metadata_store(
r"""Initializes a MetadataStore, including allocation of
resources.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_metadata_store():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.CreateMetadataStoreRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ operation = client.create_metadata_store(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateMetadataStoreRequest, dict]):
The request object. Request message for
@@ -356,6 +380,25 @@ async def get_metadata_store(
) -> metadata_store.MetadataStore:
r"""Retrieves a specific MetadataStore.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_metadata_store():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetMetadataStoreRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_metadata_store(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetMetadataStoreRequest, dict]):
The request object. Request message for
@@ -429,6 +472,26 @@ async def list_metadata_stores(
) -> pagers.ListMetadataStoresAsyncPager:
r"""Lists MetadataStores for a Location.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_metadata_stores():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListMetadataStoresRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_metadata_stores(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListMetadataStoresRequest, dict]):
The request object. Request message for
@@ -511,6 +574,30 @@ async def delete_metadata_store(
r"""Deletes a single MetadataStore and all its child
resources (Artifacts, Executions, and Contexts).
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_metadata_store():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteMetadataStoreRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_metadata_store(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteMetadataStoreRequest, dict]):
The request object. Request message for
@@ -606,6 +693,25 @@ async def create_artifact(
) -> gca_artifact.Artifact:
r"""Creates an Artifact associated with a MetadataStore.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_artifact():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.CreateArtifactRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ response = client.create_artifact(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateArtifactRequest, dict]):
The request object. Request message for
@@ -700,6 +806,25 @@ async def get_artifact(
) -> artifact.Artifact:
r"""Retrieves a specific Artifact.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_artifact():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetArtifactRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_artifact(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetArtifactRequest, dict]):
The request object. Request message for
@@ -770,6 +895,26 @@ async def list_artifacts(
) -> pagers.ListArtifactsAsyncPager:
r"""Lists Artifacts in the MetadataStore.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_artifacts():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListArtifactsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_artifacts(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListArtifactsRequest, dict]):
The request object. Request message for
@@ -852,6 +997,24 @@ async def update_artifact(
) -> gca_artifact.Artifact:
r"""Updates a stored Artifact.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_update_artifact():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.UpdateArtifactRequest(
+ )
+
+ # Make the request
+ response = client.update_artifact(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateArtifactRequest, dict]):
The request object. Request message for
@@ -938,6 +1101,29 @@ async def delete_artifact(
) -> operation_async.AsyncOperation:
r"""Deletes an Artifact.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_artifact():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteArtifactRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_artifact(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteArtifactRequest, dict]):
The request object. Request message for
@@ -1031,6 +1217,30 @@ async def purge_artifacts(
) -> operation_async.AsyncOperation:
r"""Purges Artifacts.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_purge_artifacts():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.PurgeArtifactsRequest(
+ parent="parent_value",
+ filter="filter_value",
+ )
+
+ # Make the request
+ operation = client.purge_artifacts(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.PurgeArtifactsRequest, dict]):
The request object. Request message for
@@ -1117,6 +1327,25 @@ async def create_context(
) -> gca_context.Context:
r"""Creates a Context associated with a MetadataStore.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_context():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.CreateContextRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ response = client.create_context(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateContextRequest, dict]):
The request object. Request message for
@@ -1211,6 +1440,25 @@ async def get_context(
) -> context.Context:
r"""Retrieves a specific Context.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_context():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetContextRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_context(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetContextRequest, dict]):
The request object. Request message for
@@ -1281,6 +1529,26 @@ async def list_contexts(
) -> pagers.ListContextsAsyncPager:
r"""Lists Contexts on the MetadataStore.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_contexts():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListContextsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_contexts(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListContextsRequest, dict]):
The request object. Request message for
@@ -1363,6 +1631,24 @@ async def update_context(
) -> gca_context.Context:
r"""Updates a stored Context.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_update_context():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.UpdateContextRequest(
+ )
+
+ # Make the request
+ response = client.update_context(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateContextRequest, dict]):
The request object. Request message for
@@ -1448,6 +1734,29 @@ async def delete_context(
) -> operation_async.AsyncOperation:
r"""Deletes a stored Context.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_context():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteContextRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_context(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteContextRequest, dict]):
The request object. Request message for
@@ -1541,6 +1850,30 @@ async def purge_contexts(
) -> operation_async.AsyncOperation:
r"""Purges Contexts.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_purge_contexts():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.PurgeContextsRequest(
+ parent="parent_value",
+ filter="filter_value",
+ )
+
+ # Make the request
+ operation = client.purge_contexts(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.PurgeContextsRequest, dict]):
The request object. Request message for
@@ -1631,6 +1964,26 @@ async def add_context_artifacts_and_executions(
If any of the Artifacts or Executions have already been
added to a Context, they are simply skipped.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_add_context_artifacts_and_executions():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.AddContextArtifactsAndExecutionsRequest(
+ context="context_value",
+ )
+
+ # Make the request
+ response = client.add_context_artifacts_and_executions(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.AddContextArtifactsAndExecutionsRequest, dict]):
The request object. Request message for
@@ -1732,6 +2085,26 @@ async def add_context_children(
cycle or cause any Context to have more than 10 parents, the
request will fail with an INVALID_ARGUMENT error.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_add_context_children():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.AddContextChildrenRequest(
+ context="context_value",
+ )
+
+ # Make the request
+ response = client.add_context_children(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.AddContextChildrenRequest, dict]):
The request object. Request message for
@@ -1818,6 +2191,26 @@ async def query_context_lineage_subgraph(
specified Context, connected by Event edges and returned
as a LineageSubgraph.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_query_context_lineage_subgraph():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.QueryContextLineageSubgraphRequest(
+ context="context_value",
+ )
+
+ # Make the request
+ response = client.query_context_lineage_subgraph(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.QueryContextLineageSubgraphRequest, dict]):
The request object. Request message for
@@ -1899,6 +2292,25 @@ async def create_execution(
) -> gca_execution.Execution:
r"""Creates an Execution associated with a MetadataStore.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_execution():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.CreateExecutionRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ response = client.create_execution(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateExecutionRequest, dict]):
The request object. Request message for
@@ -1993,6 +2405,25 @@ async def get_execution(
) -> execution.Execution:
r"""Retrieves a specific Execution.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_execution():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetExecutionRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_execution(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetExecutionRequest, dict]):
The request object. Request message for
@@ -2063,6 +2494,26 @@ async def list_executions(
) -> pagers.ListExecutionsAsyncPager:
r"""Lists Executions in the MetadataStore.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_executions():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListExecutionsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_executions(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListExecutionsRequest, dict]):
The request object. Request message for
@@ -2145,6 +2596,24 @@ async def update_execution(
) -> gca_execution.Execution:
r"""Updates a stored Execution.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_update_execution():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.UpdateExecutionRequest(
+ )
+
+ # Make the request
+ response = client.update_execution(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateExecutionRequest, dict]):
The request object. Request message for
@@ -2231,6 +2700,29 @@ async def delete_execution(
) -> operation_async.AsyncOperation:
r"""Deletes an Execution.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_execution():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteExecutionRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_execution(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteExecutionRequest, dict]):
The request object. Request message for
@@ -2324,6 +2816,30 @@ async def purge_executions(
) -> operation_async.AsyncOperation:
r"""Purges Executions.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_purge_executions():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.PurgeExecutionsRequest(
+ parent="parent_value",
+ filter="filter_value",
+ )
+
+ # Make the request
+ operation = client.purge_executions(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.PurgeExecutionsRequest, dict]):
The request object. Request message for
@@ -2413,6 +2929,26 @@ async def add_execution_events(
between the Execution and the Artifact, the Event is
skipped.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_add_execution_events():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.AddExecutionEventsRequest(
+ execution="execution_value",
+ )
+
+ # Make the request
+ response = client.add_execution_events(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.AddExecutionEventsRequest, dict]):
The request object. Request message for
@@ -2498,6 +3034,26 @@ async def query_execution_inputs_and_outputs(
this Execution, in the form of LineageSubgraph that also
contains the Execution and connecting Events.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_query_execution_inputs_and_outputs():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.QueryExecutionInputsAndOutputsRequest(
+ execution="execution_value",
+ )
+
+ # Make the request
+ response = client.query_execution_inputs_and_outputs(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.QueryExecutionInputsAndOutputsRequest, dict]):
The request object. Request message for
@@ -2576,6 +3132,29 @@ async def create_metadata_schema(
) -> gca_metadata_schema.MetadataSchema:
r"""Creates a MetadataSchema.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_metadata_schema():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ metadata_schema = aiplatform_v1.MetadataSchema()
+ metadata_schema.schema = "schema_value"
+
+ request = aiplatform_v1.CreateMetadataSchemaRequest(
+ parent="parent_value",
+ metadata_schema=metadata_schema,
+ )
+
+ # Make the request
+ response = client.create_metadata_schema(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateMetadataSchemaRequest, dict]):
The request object. Request message for
@@ -2672,6 +3251,25 @@ async def get_metadata_schema(
) -> metadata_schema.MetadataSchema:
r"""Retrieves a specific MetadataSchema.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_metadata_schema():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetMetadataSchemaRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_metadata_schema(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetMetadataSchemaRequest, dict]):
The request object. Request message for
@@ -2742,6 +3340,26 @@ async def list_metadata_schemas(
) -> pagers.ListMetadataSchemasAsyncPager:
r"""Lists MetadataSchemas.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_metadata_schemas():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListMetadataSchemasRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_metadata_schemas(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListMetadataSchemasRequest, dict]):
The request object. Request message for
@@ -2827,6 +3445,26 @@ async def query_artifact_lineage_subgraph(
Artifacts and Executions connected by Event edges and
returned as a LineageSubgraph.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_query_artifact_lineage_subgraph():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.QueryArtifactLineageSubgraphRequest(
+ artifact="artifact_value",
+ )
+
+ # Make the request
+ response = client.query_artifact_lineage_subgraph(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.QueryArtifactLineageSubgraphRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1/services/metadata_service/client.py b/google/cloud/aiplatform_v1/services/metadata_service/client.py
index 4b2f2ee956..871c8e8506 100644
--- a/google/cloud/aiplatform_v1/services/metadata_service/client.py
+++ b/google/cloud/aiplatform_v1/services/metadata_service/client.py
@@ -517,6 +517,31 @@ def create_metadata_store(
r"""Initializes a MetadataStore, including allocation of
resources.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_metadata_store():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.CreateMetadataStoreRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ operation = client.create_metadata_store(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateMetadataStoreRequest, dict]):
The request object. Request message for
@@ -625,6 +650,26 @@ def get_metadata_store(
) -> metadata_store.MetadataStore:
r"""Retrieves a specific MetadataStore.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_metadata_store():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetMetadataStoreRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_metadata_store(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetMetadataStoreRequest, dict]):
The request object. Request message for
@@ -698,6 +743,27 @@ def list_metadata_stores(
) -> pagers.ListMetadataStoresPager:
r"""Lists MetadataStores for a Location.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_metadata_stores():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListMetadataStoresRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_metadata_stores(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListMetadataStoresRequest, dict]):
The request object. Request message for
@@ -780,6 +846,31 @@ def delete_metadata_store(
r"""Deletes a single MetadataStore and all its child
resources (Artifacts, Executions, and Contexts).
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_metadata_store():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteMetadataStoreRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_metadata_store(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteMetadataStoreRequest, dict]):
The request object. Request message for
@@ -875,6 +966,26 @@ def create_artifact(
) -> gca_artifact.Artifact:
r"""Creates an Artifact associated with a MetadataStore.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_artifact():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.CreateArtifactRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ response = client.create_artifact(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateArtifactRequest, dict]):
The request object. Request message for
@@ -969,6 +1080,26 @@ def get_artifact(
) -> artifact.Artifact:
r"""Retrieves a specific Artifact.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_artifact():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetArtifactRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_artifact(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetArtifactRequest, dict]):
The request object. Request message for
@@ -1039,6 +1170,27 @@ def list_artifacts(
) -> pagers.ListArtifactsPager:
r"""Lists Artifacts in the MetadataStore.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_artifacts():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListArtifactsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_artifacts(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListArtifactsRequest, dict]):
The request object. Request message for
@@ -1121,6 +1273,25 @@ def update_artifact(
) -> gca_artifact.Artifact:
r"""Updates a stored Artifact.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_update_artifact():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.UpdateArtifactRequest(
+ )
+
+ # Make the request
+ response = client.update_artifact(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateArtifactRequest, dict]):
The request object. Request message for
@@ -1207,6 +1378,30 @@ def delete_artifact(
) -> gac_operation.Operation:
r"""Deletes an Artifact.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_artifact():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteArtifactRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_artifact(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteArtifactRequest, dict]):
The request object. Request message for
@@ -1300,6 +1495,31 @@ def purge_artifacts(
) -> gac_operation.Operation:
r"""Purges Artifacts.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_purge_artifacts():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.PurgeArtifactsRequest(
+ parent="parent_value",
+ filter="filter_value",
+ )
+
+ # Make the request
+ operation = client.purge_artifacts(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.PurgeArtifactsRequest, dict]):
The request object. Request message for
@@ -1386,6 +1606,26 @@ def create_context(
) -> gca_context.Context:
r"""Creates a Context associated with a MetadataStore.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_context():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.CreateContextRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ response = client.create_context(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateContextRequest, dict]):
The request object. Request message for
@@ -1480,6 +1720,26 @@ def get_context(
) -> context.Context:
r"""Retrieves a specific Context.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_context():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetContextRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_context(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetContextRequest, dict]):
The request object. Request message for
@@ -1550,6 +1810,27 @@ def list_contexts(
) -> pagers.ListContextsPager:
r"""Lists Contexts on the MetadataStore.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_contexts():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListContextsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_contexts(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListContextsRequest, dict]):
The request object. Request message for
@@ -1632,6 +1913,25 @@ def update_context(
) -> gca_context.Context:
r"""Updates a stored Context.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_update_context():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.UpdateContextRequest(
+ )
+
+ # Make the request
+ response = client.update_context(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateContextRequest, dict]):
The request object. Request message for
@@ -1717,6 +2017,30 @@ def delete_context(
) -> gac_operation.Operation:
r"""Deletes a stored Context.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_context():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteContextRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_context(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteContextRequest, dict]):
The request object. Request message for
@@ -1810,6 +2134,31 @@ def purge_contexts(
) -> gac_operation.Operation:
r"""Purges Contexts.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_purge_contexts():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.PurgeContextsRequest(
+ parent="parent_value",
+ filter="filter_value",
+ )
+
+ # Make the request
+ operation = client.purge_contexts(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.PurgeContextsRequest, dict]):
The request object. Request message for
@@ -1900,6 +2249,27 @@ def add_context_artifacts_and_executions(
If any of the Artifacts or Executions have already been
added to a Context, they are simply skipped.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_add_context_artifacts_and_executions():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.AddContextArtifactsAndExecutionsRequest(
+ context="context_value",
+ )
+
+ # Make the request
+ response = client.add_context_artifacts_and_executions(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.AddContextArtifactsAndExecutionsRequest, dict]):
The request object. Request message for
@@ -2005,6 +2375,27 @@ def add_context_children(
cycle or cause any Context to have more than 10 parents, the
request will fail with an INVALID_ARGUMENT error.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_add_context_children():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.AddContextChildrenRequest(
+ context="context_value",
+ )
+
+ # Make the request
+ response = client.add_context_children(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.AddContextChildrenRequest, dict]):
The request object. Request message for
@@ -2091,6 +2482,27 @@ def query_context_lineage_subgraph(
specified Context, connected by Event edges and returned
as a LineageSubgraph.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_query_context_lineage_subgraph():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.QueryContextLineageSubgraphRequest(
+ context="context_value",
+ )
+
+ # Make the request
+ response = client.query_context_lineage_subgraph(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.QueryContextLineageSubgraphRequest, dict]):
The request object. Request message for
@@ -2174,6 +2586,26 @@ def create_execution(
) -> gca_execution.Execution:
r"""Creates an Execution associated with a MetadataStore.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_execution():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.CreateExecutionRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ response = client.create_execution(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateExecutionRequest, dict]):
The request object. Request message for
@@ -2268,6 +2700,26 @@ def get_execution(
) -> execution.Execution:
r"""Retrieves a specific Execution.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_execution():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetExecutionRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_execution(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetExecutionRequest, dict]):
The request object. Request message for
@@ -2338,6 +2790,27 @@ def list_executions(
) -> pagers.ListExecutionsPager:
r"""Lists Executions in the MetadataStore.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_executions():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListExecutionsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_executions(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListExecutionsRequest, dict]):
The request object. Request message for
@@ -2420,6 +2893,25 @@ def update_execution(
) -> gca_execution.Execution:
r"""Updates a stored Execution.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_update_execution():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.UpdateExecutionRequest(
+ )
+
+ # Make the request
+ response = client.update_execution(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateExecutionRequest, dict]):
The request object. Request message for
@@ -2506,6 +2998,30 @@ def delete_execution(
) -> gac_operation.Operation:
r"""Deletes an Execution.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_execution():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteExecutionRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_execution(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteExecutionRequest, dict]):
The request object. Request message for
@@ -2599,6 +3115,31 @@ def purge_executions(
) -> gac_operation.Operation:
r"""Purges Executions.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_purge_executions():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.PurgeExecutionsRequest(
+ parent="parent_value",
+ filter="filter_value",
+ )
+
+ # Make the request
+ operation = client.purge_executions(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.PurgeExecutionsRequest, dict]):
The request object. Request message for
@@ -2688,6 +3229,27 @@ def add_execution_events(
between the Execution and the Artifact, the Event is
skipped.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_add_execution_events():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.AddExecutionEventsRequest(
+ execution="execution_value",
+ )
+
+ # Make the request
+ response = client.add_execution_events(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.AddExecutionEventsRequest, dict]):
The request object. Request message for
@@ -2773,6 +3335,27 @@ def query_execution_inputs_and_outputs(
this Execution, in the form of LineageSubgraph that also
contains the Execution and connecting Events.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_query_execution_inputs_and_outputs():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.QueryExecutionInputsAndOutputsRequest(
+ execution="execution_value",
+ )
+
+ # Make the request
+ response = client.query_execution_inputs_and_outputs(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.QueryExecutionInputsAndOutputsRequest, dict]):
The request object. Request message for
@@ -2855,6 +3438,30 @@ def create_metadata_schema(
) -> gca_metadata_schema.MetadataSchema:
r"""Creates a MetadataSchema.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_metadata_schema():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ metadata_schema = aiplatform_v1.MetadataSchema()
+ metadata_schema.schema = "schema_value"
+
+ request = aiplatform_v1.CreateMetadataSchemaRequest(
+ parent="parent_value",
+ metadata_schema=metadata_schema,
+ )
+
+ # Make the request
+ response = client.create_metadata_schema(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateMetadataSchemaRequest, dict]):
The request object. Request message for
@@ -2951,6 +3558,26 @@ def get_metadata_schema(
) -> metadata_schema.MetadataSchema:
r"""Retrieves a specific MetadataSchema.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_metadata_schema():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetMetadataSchemaRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_metadata_schema(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetMetadataSchemaRequest, dict]):
The request object. Request message for
@@ -3021,6 +3648,27 @@ def list_metadata_schemas(
) -> pagers.ListMetadataSchemasPager:
r"""Lists MetadataSchemas.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_metadata_schemas():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListMetadataSchemasRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_metadata_schemas(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListMetadataSchemasRequest, dict]):
The request object. Request message for
@@ -3106,6 +3754,27 @@ def query_artifact_lineage_subgraph(
Artifacts and Executions connected by Event edges and
returned as a LineageSubgraph.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_query_artifact_lineage_subgraph():
+ # Create a client
+ client = aiplatform_v1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.QueryArtifactLineageSubgraphRequest(
+ artifact="artifact_value",
+ )
+
+ # Make the request
+ response = client.query_artifact_lineage_subgraph(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.QueryArtifactLineageSubgraphRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1/services/migration_service/async_client.py b/google/cloud/aiplatform_v1/services/migration_service/async_client.py
index 05fcd67854..de0fc560c2 100644
--- a/google/cloud/aiplatform_v1/services/migration_service/async_client.py
+++ b/google/cloud/aiplatform_v1/services/migration_service/async_client.py
@@ -235,6 +235,27 @@ async def search_migratable_resources(
ml.googleapis.com that can be migrated to Vertex AI's
given location.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_search_migratable_resources():
+ # Create a client
+ client = aiplatform_v1.MigrationServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.SearchMigratableResourcesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.search_migratable_resources(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.SearchMigratableResourcesRequest, dict]):
The request object. Request message for
@@ -323,6 +344,36 @@ async def batch_migrate_resources(
automl.googleapis.com, and datalabeling.googleapis.com
to Vertex AI.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_batch_migrate_resources():
+ # Create a client
+ client = aiplatform_v1.MigrationServiceClient()
+
+ # Initialize request argument(s)
+ migrate_resource_requests = aiplatform_v1.MigrateResourceRequest()
+ migrate_resource_requests.migrate_ml_engine_model_version_config.endpoint = "endpoint_value"
+ migrate_resource_requests.migrate_ml_engine_model_version_config.model_version = "model_version_value"
+ migrate_resource_requests.migrate_ml_engine_model_version_config.model_display_name = "model_display_name_value"
+
+ request = aiplatform_v1.BatchMigrateResourcesRequest(
+ parent="parent_value",
+ migrate_resource_requests=migrate_resource_requests,
+ )
+
+ # Make the request
+ operation = client.batch_migrate_resources(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.BatchMigrateResourcesRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1/services/migration_service/client.py b/google/cloud/aiplatform_v1/services/migration_service/client.py
index f50dbfb2f3..0e63a91c8e 100644
--- a/google/cloud/aiplatform_v1/services/migration_service/client.py
+++ b/google/cloud/aiplatform_v1/services/migration_service/client.py
@@ -199,32 +199,32 @@ def parse_dataset_path(path: str) -> Dict[str, str]:
return m.groupdict() if m else {}
@staticmethod
- def dataset_path(project: str, location: str, dataset: str,) -> str:
+ def dataset_path(project: str, dataset: str,) -> str:
"""Returns a fully-qualified dataset string."""
- return "projects/{project}/locations/{location}/datasets/{dataset}".format(
- project=project, location=location, dataset=dataset,
+ return "projects/{project}/datasets/{dataset}".format(
+ project=project, dataset=dataset,
)
@staticmethod
def parse_dataset_path(path: str) -> Dict[str, str]:
"""Parses a dataset path into its component segments."""
- m = re.match(
- r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$",
- path,
- )
+ m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
- def dataset_path(project: str, dataset: str,) -> str:
+ def dataset_path(project: str, location: str, dataset: str,) -> str:
"""Returns a fully-qualified dataset string."""
- return "projects/{project}/datasets/{dataset}".format(
- project=project, dataset=dataset,
+ return "projects/{project}/locations/{location}/datasets/{dataset}".format(
+ project=project, location=location, dataset=dataset,
)
@staticmethod
def parse_dataset_path(path: str) -> Dict[str, str]:
"""Parses a dataset path into its component segments."""
- m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path)
+ m = re.match(
+ r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$",
+ path,
+ )
return m.groupdict() if m else {}
@staticmethod
@@ -513,6 +513,28 @@ def search_migratable_resources(
ml.googleapis.com that can be migrated to Vertex AI's
given location.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_search_migratable_resources():
+ # Create a client
+ client = aiplatform_v1.MigrationServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.SearchMigratableResourcesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.search_migratable_resources(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.SearchMigratableResourcesRequest, dict]):
The request object. Request message for
@@ -603,6 +625,37 @@ def batch_migrate_resources(
automl.googleapis.com, and datalabeling.googleapis.com
to Vertex AI.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_batch_migrate_resources():
+ # Create a client
+ client = aiplatform_v1.MigrationServiceClient()
+
+ # Initialize request argument(s)
+ migrate_resource_requests = aiplatform_v1.MigrateResourceRequest()
+ migrate_resource_requests.migrate_ml_engine_model_version_config.endpoint = "endpoint_value"
+ migrate_resource_requests.migrate_ml_engine_model_version_config.model_version = "model_version_value"
+ migrate_resource_requests.migrate_ml_engine_model_version_config.model_display_name = "model_display_name_value"
+
+ request = aiplatform_v1.BatchMigrateResourcesRequest(
+ parent="parent_value",
+ migrate_resource_requests=migrate_resource_requests,
+ )
+
+ # Make the request
+ operation = client.batch_migrate_resources(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.BatchMigrateResourcesRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1/services/model_service/async_client.py b/google/cloud/aiplatform_v1/services/model_service/async_client.py
index ef8a6ea146..f91b95d09c 100644
--- a/google/cloud/aiplatform_v1/services/model_service/async_client.py
+++ b/google/cloud/aiplatform_v1/services/model_service/async_client.py
@@ -240,6 +240,33 @@ async def upload_model(
) -> operation_async.AsyncOperation:
r"""Uploads a Model artifact into Vertex AI.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_upload_model():
+ # Create a client
+ client = aiplatform_v1.ModelServiceClient()
+
+ # Initialize request argument(s)
+ model = aiplatform_v1.Model()
+ model.display_name = "display_name_value"
+
+ request = aiplatform_v1.UploadModelRequest(
+ parent="parent_value",
+ model=model,
+ )
+
+ # Make the request
+ operation = client.upload_model(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.UploadModelRequest, dict]):
The request object. Request message for
@@ -332,6 +359,25 @@ async def get_model(
) -> model.Model:
r"""Gets a Model.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_model():
+ # Create a client
+ client = aiplatform_v1.ModelServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetModelRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_model(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetModelRequest, dict]):
The request object. Request message for
@@ -401,6 +447,26 @@ async def list_models(
) -> pagers.ListModelsAsyncPager:
r"""Lists Models in a Location.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_models():
+ # Create a client
+ client = aiplatform_v1.ModelServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListModelsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_models(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListModelsRequest, dict]):
The request object. Request message for
@@ -483,6 +549,28 @@ async def update_model(
) -> gca_model.Model:
r"""Updates a Model.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_update_model():
+ # Create a client
+ client = aiplatform_v1.ModelServiceClient()
+
+ # Initialize request argument(s)
+ model = aiplatform_v1.Model()
+ model.display_name = "display_name_value"
+
+ request = aiplatform_v1.UpdateModelRequest(
+ model=model,
+ )
+
+ # Make the request
+ response = client.update_model(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateModelRequest, dict]):
The request object. Request message for
@@ -571,6 +659,30 @@ async def delete_model(
[deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models]
field.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_model():
+ # Create a client
+ client = aiplatform_v1.ModelServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteModelRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_model(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteModelRequest, dict]):
The request object. Request message for
@@ -668,6 +780,30 @@ async def export_model(
least one [supported export
format][google.cloud.aiplatform.v1.Model.supported_export_formats].
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_export_model():
+ # Create a client
+ client = aiplatform_v1.ModelServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ExportModelRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.export_model(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ExportModelRequest, dict]):
The request object. Request message for
@@ -761,6 +897,25 @@ async def get_model_evaluation(
) -> model_evaluation.ModelEvaluation:
r"""Gets a ModelEvaluation.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_model_evaluation():
+ # Create a client
+ client = aiplatform_v1.ModelServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetModelEvaluationRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_model_evaluation(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetModelEvaluationRequest, dict]):
The request object. Request message for
@@ -835,6 +990,26 @@ async def list_model_evaluations(
) -> pagers.ListModelEvaluationsAsyncPager:
r"""Lists ModelEvaluations in a Model.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_model_evaluations():
+ # Create a client
+ client = aiplatform_v1.ModelServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListModelEvaluationsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_model_evaluations(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListModelEvaluationsRequest, dict]):
The request object. Request message for
@@ -916,6 +1091,25 @@ async def get_model_evaluation_slice(
) -> model_evaluation_slice.ModelEvaluationSlice:
r"""Gets a ModelEvaluationSlice.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_model_evaluation_slice():
+ # Create a client
+ client = aiplatform_v1.ModelServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetModelEvaluationSliceRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_model_evaluation_slice(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetModelEvaluationSliceRequest, dict]):
The request object. Request message for
@@ -990,6 +1184,26 @@ async def list_model_evaluation_slices(
) -> pagers.ListModelEvaluationSlicesAsyncPager:
r"""Lists ModelEvaluationSlices in a ModelEvaluation.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_model_evaluation_slices():
+ # Create a client
+ client = aiplatform_v1.ModelServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListModelEvaluationSlicesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_model_evaluation_slices(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1/services/model_service/client.py b/google/cloud/aiplatform_v1/services/model_service/client.py
index c2ba20b31f..576bdce2bb 100644
--- a/google/cloud/aiplatform_v1/services/model_service/client.py
+++ b/google/cloud/aiplatform_v1/services/model_service/client.py
@@ -497,6 +497,34 @@ def upload_model(
) -> gac_operation.Operation:
r"""Uploads a Model artifact into Vertex AI.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_upload_model():
+ # Create a client
+ client = aiplatform_v1.ModelServiceClient()
+
+ # Initialize request argument(s)
+ model = aiplatform_v1.Model()
+ model.display_name = "display_name_value"
+
+ request = aiplatform_v1.UploadModelRequest(
+ parent="parent_value",
+ model=model,
+ )
+
+ # Make the request
+ operation = client.upload_model(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.UploadModelRequest, dict]):
The request object. Request message for
@@ -589,6 +617,26 @@ def get_model(
) -> model.Model:
r"""Gets a Model.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_model():
+ # Create a client
+ client = aiplatform_v1.ModelServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetModelRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_model(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetModelRequest, dict]):
The request object. Request message for
@@ -658,6 +706,27 @@ def list_models(
) -> pagers.ListModelsPager:
r"""Lists Models in a Location.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_models():
+ # Create a client
+ client = aiplatform_v1.ModelServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListModelsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_models(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListModelsRequest, dict]):
The request object. Request message for
@@ -740,6 +809,29 @@ def update_model(
) -> gca_model.Model:
r"""Updates a Model.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_update_model():
+ # Create a client
+ client = aiplatform_v1.ModelServiceClient()
+
+ # Initialize request argument(s)
+ model = aiplatform_v1.Model()
+ model.display_name = "display_name_value"
+
+ request = aiplatform_v1.UpdateModelRequest(
+ model=model,
+ )
+
+ # Make the request
+ response = client.update_model(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateModelRequest, dict]):
The request object. Request message for
@@ -828,6 +920,31 @@ def delete_model(
[deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models]
field.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_model():
+ # Create a client
+ client = aiplatform_v1.ModelServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteModelRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_model(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteModelRequest, dict]):
The request object. Request message for
@@ -925,6 +1042,31 @@ def export_model(
least one [supported export
format][google.cloud.aiplatform.v1.Model.supported_export_formats].
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_export_model():
+ # Create a client
+ client = aiplatform_v1.ModelServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ExportModelRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.export_model(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ExportModelRequest, dict]):
The request object. Request message for
@@ -1018,6 +1160,26 @@ def get_model_evaluation(
) -> model_evaluation.ModelEvaluation:
r"""Gets a ModelEvaluation.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_model_evaluation():
+ # Create a client
+ client = aiplatform_v1.ModelServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetModelEvaluationRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_model_evaluation(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetModelEvaluationRequest, dict]):
The request object. Request message for
@@ -1092,6 +1254,27 @@ def list_model_evaluations(
) -> pagers.ListModelEvaluationsPager:
r"""Lists ModelEvaluations in a Model.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_model_evaluations():
+ # Create a client
+ client = aiplatform_v1.ModelServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListModelEvaluationsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_model_evaluations(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListModelEvaluationsRequest, dict]):
The request object. Request message for
@@ -1173,6 +1356,26 @@ def get_model_evaluation_slice(
) -> model_evaluation_slice.ModelEvaluationSlice:
r"""Gets a ModelEvaluationSlice.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_model_evaluation_slice():
+ # Create a client
+ client = aiplatform_v1.ModelServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetModelEvaluationSliceRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_model_evaluation_slice(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetModelEvaluationSliceRequest, dict]):
The request object. Request message for
@@ -1249,6 +1452,27 @@ def list_model_evaluation_slices(
) -> pagers.ListModelEvaluationSlicesPager:
r"""Lists ModelEvaluationSlices in a ModelEvaluation.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_model_evaluation_slices():
+ # Create a client
+ client = aiplatform_v1.ModelServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListModelEvaluationSlicesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_model_evaluation_slices(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py b/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py
index b48636f058..87bd5850bd 100644
--- a/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py
+++ b/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py
@@ -253,6 +253,32 @@ async def create_training_pipeline(
r"""Creates a TrainingPipeline. A created
TrainingPipeline right away will be attempted to be run.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_training_pipeline():
+ # Create a client
+ client = aiplatform_v1.PipelineServiceClient()
+
+ # Initialize request argument(s)
+ training_pipeline = aiplatform_v1.TrainingPipeline()
+ training_pipeline.display_name = "display_name_value"
+ training_pipeline.training_task_definition = "training_task_definition_value"
+ training_pipeline.training_task_inputs.null_value = "NULL_VALUE"
+
+ request = aiplatform_v1.CreateTrainingPipelineRequest(
+ parent="parent_value",
+ training_pipeline=training_pipeline,
+ )
+
+ # Make the request
+ response = client.create_training_pipeline(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateTrainingPipelineRequest, dict]):
The request object. Request message for
@@ -338,6 +364,25 @@ async def get_training_pipeline(
) -> training_pipeline.TrainingPipeline:
r"""Gets a TrainingPipeline.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_training_pipeline():
+ # Create a client
+ client = aiplatform_v1.PipelineServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetTrainingPipelineRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_training_pipeline(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetTrainingPipelineRequest, dict]):
The request object. Request message for
@@ -414,6 +459,26 @@ async def list_training_pipelines(
) -> pagers.ListTrainingPipelinesAsyncPager:
r"""Lists TrainingPipelines in a Location.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_training_pipelines():
+ # Create a client
+ client = aiplatform_v1.PipelineServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListTrainingPipelinesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_training_pipelines(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListTrainingPipelinesRequest, dict]):
The request object. Request message for
@@ -495,6 +560,29 @@ async def delete_training_pipeline(
) -> operation_async.AsyncOperation:
r"""Deletes a TrainingPipeline.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_training_pipeline():
+ # Create a client
+ client = aiplatform_v1.PipelineServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteTrainingPipelineRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_training_pipeline(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteTrainingPipelineRequest, dict]):
The request object. Request message for
@@ -600,6 +688,23 @@ async def cancel_training_pipeline(
[TrainingPipeline.state][google.cloud.aiplatform.v1.TrainingPipeline.state]
is set to ``CANCELLED``.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_cancel_training_pipeline():
+ # Create a client
+ client = aiplatform_v1.PipelineServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.CancelTrainingPipelineRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.cancel_training_pipeline(request=request)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CancelTrainingPipelineRequest, dict]):
The request object. Request message for
@@ -668,6 +773,26 @@ async def create_pipeline_job(
r"""Creates a PipelineJob. A PipelineJob will run
immediately when created.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_pipeline_job():
+ # Create a client
+ client = aiplatform_v1.PipelineServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.CreatePipelineJobRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ response = client.create_pipeline_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreatePipelineJobRequest, dict]):
The request object. Request message for
@@ -760,6 +885,25 @@ async def get_pipeline_job(
) -> pipeline_job.PipelineJob:
r"""Gets a PipelineJob.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_pipeline_job():
+ # Create a client
+ client = aiplatform_v1.PipelineServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetPipelineJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_pipeline_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetPipelineJobRequest, dict]):
The request object. Request message for
@@ -831,6 +975,26 @@ async def list_pipeline_jobs(
) -> pagers.ListPipelineJobsAsyncPager:
r"""Lists PipelineJobs in a Location.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_pipeline_jobs():
+ # Create a client
+ client = aiplatform_v1.PipelineServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListPipelineJobsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_pipeline_jobs(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListPipelineJobsRequest, dict]):
The request object. Request message for
@@ -912,6 +1076,29 @@ async def delete_pipeline_job(
) -> operation_async.AsyncOperation:
r"""Deletes a PipelineJob.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_pipeline_job():
+ # Create a client
+ client = aiplatform_v1.PipelineServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeletePipelineJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_pipeline_job(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeletePipelineJobRequest, dict]):
The request object. Request message for
@@ -1017,6 +1204,23 @@ async def cancel_pipeline_job(
[PipelineJob.state][google.cloud.aiplatform.v1.PipelineJob.state]
is set to ``CANCELLED``.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_cancel_pipeline_job():
+ # Create a client
+ client = aiplatform_v1.PipelineServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.CancelPipelineJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.cancel_pipeline_job(request=request)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CancelPipelineJobRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/client.py b/google/cloud/aiplatform_v1/services/pipeline_service/client.py
index 9781ff020a..829847157c 100644
--- a/google/cloud/aiplatform_v1/services/pipeline_service/client.py
+++ b/google/cloud/aiplatform_v1/services/pipeline_service/client.py
@@ -574,6 +574,33 @@ def create_training_pipeline(
r"""Creates a TrainingPipeline. A created
TrainingPipeline right away will be attempted to be run.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_training_pipeline():
+ # Create a client
+ client = aiplatform_v1.PipelineServiceClient()
+
+ # Initialize request argument(s)
+ training_pipeline = aiplatform_v1.TrainingPipeline()
+ training_pipeline.display_name = "display_name_value"
+ training_pipeline.training_task_definition = "training_task_definition_value"
+ training_pipeline.training_task_inputs.null_value = "NULL_VALUE"
+
+ request = aiplatform_v1.CreateTrainingPipelineRequest(
+ parent="parent_value",
+ training_pipeline=training_pipeline,
+ )
+
+ # Make the request
+ response = client.create_training_pipeline(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateTrainingPipelineRequest, dict]):
The request object. Request message for
@@ -659,6 +686,26 @@ def get_training_pipeline(
) -> training_pipeline.TrainingPipeline:
r"""Gets a TrainingPipeline.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_training_pipeline():
+ # Create a client
+ client = aiplatform_v1.PipelineServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetTrainingPipelineRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_training_pipeline(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetTrainingPipelineRequest, dict]):
The request object. Request message for
@@ -735,6 +782,27 @@ def list_training_pipelines(
) -> pagers.ListTrainingPipelinesPager:
r"""Lists TrainingPipelines in a Location.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_training_pipelines():
+ # Create a client
+ client = aiplatform_v1.PipelineServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListTrainingPipelinesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_training_pipelines(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListTrainingPipelinesRequest, dict]):
The request object. Request message for
@@ -816,6 +884,30 @@ def delete_training_pipeline(
) -> gac_operation.Operation:
r"""Deletes a TrainingPipeline.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_training_pipeline():
+ # Create a client
+ client = aiplatform_v1.PipelineServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteTrainingPipelineRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_training_pipeline(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteTrainingPipelineRequest, dict]):
The request object. Request message for
@@ -921,6 +1013,24 @@ def cancel_training_pipeline(
[TrainingPipeline.state][google.cloud.aiplatform.v1.TrainingPipeline.state]
is set to ``CANCELLED``.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_cancel_training_pipeline():
+ # Create a client
+ client = aiplatform_v1.PipelineServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.CancelTrainingPipelineRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.cancel_training_pipeline(request=request)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CancelTrainingPipelineRequest, dict]):
The request object. Request message for
@@ -989,6 +1099,27 @@ def create_pipeline_job(
r"""Creates a PipelineJob. A PipelineJob will run
immediately when created.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_pipeline_job():
+ # Create a client
+ client = aiplatform_v1.PipelineServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.CreatePipelineJobRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ response = client.create_pipeline_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreatePipelineJobRequest, dict]):
The request object. Request message for
@@ -1081,6 +1212,26 @@ def get_pipeline_job(
) -> pipeline_job.PipelineJob:
r"""Gets a PipelineJob.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_pipeline_job():
+ # Create a client
+ client = aiplatform_v1.PipelineServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetPipelineJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_pipeline_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetPipelineJobRequest, dict]):
The request object. Request message for
@@ -1152,6 +1303,27 @@ def list_pipeline_jobs(
) -> pagers.ListPipelineJobsPager:
r"""Lists PipelineJobs in a Location.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_pipeline_jobs():
+ # Create a client
+ client = aiplatform_v1.PipelineServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListPipelineJobsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_pipeline_jobs(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListPipelineJobsRequest, dict]):
The request object. Request message for
@@ -1233,6 +1405,30 @@ def delete_pipeline_job(
) -> gac_operation.Operation:
r"""Deletes a PipelineJob.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_pipeline_job():
+ # Create a client
+ client = aiplatform_v1.PipelineServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeletePipelineJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_pipeline_job(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeletePipelineJobRequest, dict]):
The request object. Request message for
@@ -1338,6 +1534,24 @@ def cancel_pipeline_job(
[PipelineJob.state][google.cloud.aiplatform.v1.PipelineJob.state]
is set to ``CANCELLED``.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_cancel_pipeline_job():
+ # Create a client
+ client = aiplatform_v1.PipelineServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.CancelPipelineJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.cancel_pipeline_job(request=request)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CancelPipelineJobRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1/services/prediction_service/async_client.py b/google/cloud/aiplatform_v1/services/prediction_service/async_client.py
index e6126cd653..9d8bc5a040 100644
--- a/google/cloud/aiplatform_v1/services/prediction_service/async_client.py
+++ b/google/cloud/aiplatform_v1/services/prediction_service/async_client.py
@@ -220,6 +220,29 @@ async def predict(
) -> prediction_service.PredictResponse:
r"""Perform an online prediction.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_predict():
+ # Create a client
+ client = aiplatform_v1.PredictionServiceClient()
+
+ # Initialize request argument(s)
+ instances = aiplatform_v1.Value()
+ instances.null_value = "NULL_VALUE"
+
+ request = aiplatform_v1.PredictRequest(
+ endpoint="endpoint_value",
+ instances=instances,
+ )
+
+ # Make the request
+ response = client.predict(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.PredictRequest, dict]):
The request object. Request message for
@@ -334,6 +357,26 @@ async def raw_predict(
[DeployedModel][google.cloud.aiplatform.v1.DeployedModel]
that served this prediction.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_raw_predict():
+ # Create a client
+ client = aiplatform_v1.PredictionServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.RawPredictRequest(
+ endpoint="endpoint_value",
+ )
+
+ # Make the request
+ response = client.raw_predict(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.RawPredictRequest, dict]):
The request object. Request message for
@@ -492,6 +535,30 @@ async def explain(
populated. Only deployed AutoML tabular Models have
explanation_spec.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_explain():
+ # Create a client
+ client = aiplatform_v1.PredictionServiceClient()
+
+ # Initialize request argument(s)
+ instances = aiplatform_v1.Value()
+ instances.null_value = "NULL_VALUE"
+
+ request = aiplatform_v1.ExplainRequest(
+ endpoint="endpoint_value",
+ instances=instances,
+ )
+
+ # Make the request
+ response = client.explain(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ExplainRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1/services/prediction_service/client.py b/google/cloud/aiplatform_v1/services/prediction_service/client.py
index b145cc7c32..9eab5431c8 100644
--- a/google/cloud/aiplatform_v1/services/prediction_service/client.py
+++ b/google/cloud/aiplatform_v1/services/prediction_service/client.py
@@ -433,6 +433,30 @@ def predict(
) -> prediction_service.PredictResponse:
r"""Perform an online prediction.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_predict():
+ # Create a client
+ client = aiplatform_v1.PredictionServiceClient()
+
+ # Initialize request argument(s)
+ instances = aiplatform_v1.Value()
+ instances.null_value = "NULL_VALUE"
+
+ request = aiplatform_v1.PredictRequest(
+ endpoint="endpoint_value",
+ instances=instances,
+ )
+
+ # Make the request
+ response = client.predict(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.PredictRequest, dict]):
The request object. Request message for
@@ -547,6 +571,27 @@ def raw_predict(
[DeployedModel][google.cloud.aiplatform.v1.DeployedModel]
that served this prediction.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_raw_predict():
+ # Create a client
+ client = aiplatform_v1.PredictionServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.RawPredictRequest(
+ endpoint="endpoint_value",
+ )
+
+ # Make the request
+ response = client.raw_predict(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.RawPredictRequest, dict]):
The request object. Request message for
@@ -705,6 +750,31 @@ def explain(
populated. Only deployed AutoML tabular Models have
explanation_spec.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_explain():
+ # Create a client
+ client = aiplatform_v1.PredictionServiceClient()
+
+ # Initialize request argument(s)
+ instances = aiplatform_v1.Value()
+ instances.null_value = "NULL_VALUE"
+
+ request = aiplatform_v1.ExplainRequest(
+ endpoint="endpoint_value",
+ instances=instances,
+ )
+
+ # Make the request
+ response = client.explain(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ExplainRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py
index a1142f9b2a..7c166e938d 100644
--- a/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py
+++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py
@@ -236,6 +236,34 @@ async def create_specialist_pool(
) -> operation_async.AsyncOperation:
r"""Creates a SpecialistPool.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_specialist_pool():
+ # Create a client
+ client = aiplatform_v1.SpecialistPoolServiceClient()
+
+ # Initialize request argument(s)
+ specialist_pool = aiplatform_v1.SpecialistPool()
+ specialist_pool.name = "name_value"
+ specialist_pool.display_name = "display_name_value"
+
+ request = aiplatform_v1.CreateSpecialistPoolRequest(
+ parent="parent_value",
+ specialist_pool=specialist_pool,
+ )
+
+ # Make the request
+ operation = client.create_specialist_pool(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateSpecialistPoolRequest, dict]):
The request object. Request message for
@@ -333,6 +361,25 @@ async def get_specialist_pool(
) -> specialist_pool.SpecialistPool:
r"""Gets a SpecialistPool.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_specialist_pool():
+ # Create a client
+ client = aiplatform_v1.SpecialistPoolServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetSpecialistPoolRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_specialist_pool(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetSpecialistPoolRequest, dict]):
The request object. Request message for
@@ -415,6 +462,26 @@ async def list_specialist_pools(
) -> pagers.ListSpecialistPoolsAsyncPager:
r"""Lists SpecialistPools in a Location.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_specialist_pools():
+ # Create a client
+ client = aiplatform_v1.SpecialistPoolServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListSpecialistPoolsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_specialist_pools(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListSpecialistPoolsRequest, dict]):
The request object. Request message for
@@ -499,6 +566,30 @@ async def delete_specialist_pool(
r"""Deletes a SpecialistPool as well as all Specialists
in the pool.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_specialist_pool():
+ # Create a client
+ client = aiplatform_v1.SpecialistPoolServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteSpecialistPoolRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_specialist_pool(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteSpecialistPoolRequest, dict]):
The request object. Request message for
@@ -595,6 +686,33 @@ async def update_specialist_pool(
) -> operation_async.AsyncOperation:
r"""Updates a SpecialistPool.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_update_specialist_pool():
+ # Create a client
+ client = aiplatform_v1.SpecialistPoolServiceClient()
+
+ # Initialize request argument(s)
+ specialist_pool = aiplatform_v1.SpecialistPool()
+ specialist_pool.name = "name_value"
+ specialist_pool.display_name = "display_name_value"
+
+ request = aiplatform_v1.UpdateSpecialistPoolRequest(
+ specialist_pool=specialist_pool,
+ )
+
+ # Make the request
+ operation = client.update_specialist_pool(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateSpecialistPoolRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py
index 374c584431..2765cfdedb 100644
--- a/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py
+++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py
@@ -428,6 +428,35 @@ def create_specialist_pool(
) -> gac_operation.Operation:
r"""Creates a SpecialistPool.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_specialist_pool():
+ # Create a client
+ client = aiplatform_v1.SpecialistPoolServiceClient()
+
+ # Initialize request argument(s)
+ specialist_pool = aiplatform_v1.SpecialistPool()
+ specialist_pool.name = "name_value"
+ specialist_pool.display_name = "display_name_value"
+
+ request = aiplatform_v1.CreateSpecialistPoolRequest(
+ parent="parent_value",
+ specialist_pool=specialist_pool,
+ )
+
+ # Make the request
+ operation = client.create_specialist_pool(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateSpecialistPoolRequest, dict]):
The request object. Request message for
@@ -525,6 +554,26 @@ def get_specialist_pool(
) -> specialist_pool.SpecialistPool:
r"""Gets a SpecialistPool.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_specialist_pool():
+ # Create a client
+ client = aiplatform_v1.SpecialistPoolServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetSpecialistPoolRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_specialist_pool(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetSpecialistPoolRequest, dict]):
The request object. Request message for
@@ -607,6 +656,27 @@ def list_specialist_pools(
) -> pagers.ListSpecialistPoolsPager:
r"""Lists SpecialistPools in a Location.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_specialist_pools():
+ # Create a client
+ client = aiplatform_v1.SpecialistPoolServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListSpecialistPoolsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_specialist_pools(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListSpecialistPoolsRequest, dict]):
The request object. Request message for
@@ -691,6 +761,31 @@ def delete_specialist_pool(
r"""Deletes a SpecialistPool as well as all Specialists
in the pool.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_specialist_pool():
+ # Create a client
+ client = aiplatform_v1.SpecialistPoolServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteSpecialistPoolRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_specialist_pool(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteSpecialistPoolRequest, dict]):
The request object. Request message for
@@ -787,6 +882,34 @@ def update_specialist_pool(
) -> gac_operation.Operation:
r"""Updates a SpecialistPool.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_update_specialist_pool():
+ # Create a client
+ client = aiplatform_v1.SpecialistPoolServiceClient()
+
+ # Initialize request argument(s)
+ specialist_pool = aiplatform_v1.SpecialistPool()
+ specialist_pool.name = "name_value"
+ specialist_pool.display_name = "display_name_value"
+
+ request = aiplatform_v1.UpdateSpecialistPoolRequest(
+ specialist_pool=specialist_pool,
+ )
+
+ # Make the request
+ operation = client.update_specialist_pool(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateSpecialistPoolRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1/services/tensorboard_service/async_client.py b/google/cloud/aiplatform_v1/services/tensorboard_service/async_client.py
index f2ea3ffc3b..009c59d561 100644
--- a/google/cloud/aiplatform_v1/services/tensorboard_service/async_client.py
+++ b/google/cloud/aiplatform_v1/services/tensorboard_service/async_client.py
@@ -262,6 +262,33 @@ async def create_tensorboard(
) -> operation_async.AsyncOperation:
r"""Creates a Tensorboard.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_tensorboard():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ tensorboard = aiplatform_v1.Tensorboard()
+ tensorboard.display_name = "display_name_value"
+
+ request = aiplatform_v1.CreateTensorboardRequest(
+ parent="parent_value",
+ tensorboard=tensorboard,
+ )
+
+ # Make the request
+ operation = client.create_tensorboard(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateTensorboardRequest, dict]):
The request object. Request message for
@@ -353,6 +380,25 @@ async def get_tensorboard(
) -> tensorboard.Tensorboard:
r"""Gets a Tensorboard.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_tensorboard():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetTensorboardRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_tensorboard(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetTensorboardRequest, dict]):
The request object. Request message for
@@ -429,6 +475,32 @@ async def update_tensorboard(
) -> operation_async.AsyncOperation:
r"""Updates a Tensorboard.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_update_tensorboard():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ tensorboard = aiplatform_v1.Tensorboard()
+ tensorboard.display_name = "display_name_value"
+
+ request = aiplatform_v1.UpdateTensorboardRequest(
+ tensorboard=tensorboard,
+ )
+
+ # Make the request
+ operation = client.update_tensorboard(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateTensorboardRequest, dict]):
The request object. Request message for
@@ -529,6 +601,26 @@ async def list_tensorboards(
) -> pagers.ListTensorboardsAsyncPager:
r"""Lists Tensorboards in a Location.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_tensorboards():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListTensorboardsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_tensorboards(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListTensorboardsRequest, dict]):
The request object. Request message for
@@ -610,6 +702,29 @@ async def delete_tensorboard(
) -> operation_async.AsyncOperation:
r"""Deletes a Tensorboard.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_tensorboard():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteTensorboardRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_tensorboard(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteTensorboardRequest, dict]):
The request object. Request message for
@@ -707,6 +822,26 @@ async def create_tensorboard_experiment(
) -> gca_tensorboard_experiment.TensorboardExperiment:
r"""Creates a TensorboardExperiment.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_tensorboard_experiment():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.CreateTensorboardExperimentRequest(
+ parent="parent_value",
+ tensorboard_experiment_id="tensorboard_experiment_id_value",
+ )
+
+ # Make the request
+ response = client.create_tensorboard_experiment(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateTensorboardExperimentRequest, dict]):
The request object. Request message for
@@ -805,6 +940,25 @@ async def get_tensorboard_experiment(
) -> tensorboard_experiment.TensorboardExperiment:
r"""Gets a TensorboardExperiment.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_tensorboard_experiment():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetTensorboardExperimentRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_tensorboard_experiment(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetTensorboardExperimentRequest, dict]):
The request object. Request message for
@@ -882,6 +1036,24 @@ async def update_tensorboard_experiment(
) -> gca_tensorboard_experiment.TensorboardExperiment:
r"""Updates a TensorboardExperiment.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_update_tensorboard_experiment():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.UpdateTensorboardExperimentRequest(
+ )
+
+ # Make the request
+ response = client.update_tensorboard_experiment(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateTensorboardExperimentRequest, dict]):
The request object. Request message for
@@ -975,6 +1147,26 @@ async def list_tensorboard_experiments(
) -> pagers.ListTensorboardExperimentsAsyncPager:
r"""Lists TensorboardExperiments in a Location.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_tensorboard_experiments():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListTensorboardExperimentsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_tensorboard_experiments(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListTensorboardExperimentsRequest, dict]):
The request object. Request message for
@@ -1059,6 +1251,29 @@ async def delete_tensorboard_experiment(
) -> operation_async.AsyncOperation:
r"""Deletes a TensorboardExperiment.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_tensorboard_experiment():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteTensorboardExperimentRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_tensorboard_experiment(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteTensorboardExperimentRequest, dict]):
The request object. Request message for
@@ -1154,6 +1369,30 @@ async def create_tensorboard_run(
) -> gca_tensorboard_run.TensorboardRun:
r"""Creates a TensorboardRun.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_tensorboard_run():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ tensorboard_run = aiplatform_v1.TensorboardRun()
+ tensorboard_run.display_name = "display_name_value"
+
+ request = aiplatform_v1.CreateTensorboardRunRequest(
+ parent="parent_value",
+ tensorboard_run=tensorboard_run,
+ tensorboard_run_id="tensorboard_run_id_value",
+ )
+
+ # Make the request
+ response = client.create_tensorboard_run(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateTensorboardRunRequest, dict]):
The request object. Request message for
@@ -1253,6 +1492,31 @@ async def batch_create_tensorboard_runs(
) -> tensorboard_service.BatchCreateTensorboardRunsResponse:
r"""Batch create TensorboardRuns.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_batch_create_tensorboard_runs():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ requests = aiplatform_v1.CreateTensorboardRunRequest()
+ requests.parent = "parent_value"
+ requests.tensorboard_run.display_name = "display_name_value"
+ requests.tensorboard_run_id = "tensorboard_run_id_value"
+
+ request = aiplatform_v1.BatchCreateTensorboardRunsRequest(
+ parent="parent_value",
+ requests=requests,
+ )
+
+ # Make the request
+ response = client.batch_create_tensorboard_runs(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.BatchCreateTensorboardRunsRequest, dict]):
The request object. Request message for
@@ -1339,6 +1603,25 @@ async def get_tensorboard_run(
) -> tensorboard_run.TensorboardRun:
r"""Gets a TensorboardRun.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_tensorboard_run():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetTensorboardRunRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_tensorboard_run(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetTensorboardRunRequest, dict]):
The request object. Request message for
@@ -1414,6 +1697,28 @@ async def update_tensorboard_run(
) -> gca_tensorboard_run.TensorboardRun:
r"""Updates a TensorboardRun.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_update_tensorboard_run():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ tensorboard_run = aiplatform_v1.TensorboardRun()
+ tensorboard_run.display_name = "display_name_value"
+
+ request = aiplatform_v1.UpdateTensorboardRunRequest(
+ tensorboard_run=tensorboard_run,
+ )
+
+ # Make the request
+ response = client.update_tensorboard_run(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateTensorboardRunRequest, dict]):
The request object. Request message for
@@ -1504,6 +1809,26 @@ async def list_tensorboard_runs(
) -> pagers.ListTensorboardRunsAsyncPager:
r"""Lists TensorboardRuns in a Location.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_tensorboard_runs():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListTensorboardRunsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_tensorboard_runs(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListTensorboardRunsRequest, dict]):
The request object. Request message for
@@ -1586,6 +1911,29 @@ async def delete_tensorboard_run(
) -> operation_async.AsyncOperation:
r"""Deletes a TensorboardRun.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_tensorboard_run():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteTensorboardRunRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_tensorboard_run(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteTensorboardRunRequest, dict]):
The request object. Request message for
@@ -1685,6 +2033,32 @@ async def batch_create_tensorboard_time_series(
r"""Batch create TensorboardTimeSeries that belong to a
TensorboardExperiment.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_batch_create_tensorboard_time_series():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ requests = aiplatform_v1.CreateTensorboardTimeSeriesRequest()
+ requests.parent = "parent_value"
+ requests.tensorboard_time_series.display_name = "display_name_value"
+ requests.tensorboard_time_series.value_type = "BLOB_SEQUENCE"
+
+ request = aiplatform_v1.BatchCreateTensorboardTimeSeriesRequest(
+ parent="parent_value",
+ requests=requests,
+ )
+
+ # Make the request
+ response = client.batch_create_tensorboard_time_series(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.BatchCreateTensorboardTimeSeriesRequest, dict]):
The request object. Request message for
@@ -1775,6 +2149,30 @@ async def create_tensorboard_time_series(
) -> gca_tensorboard_time_series.TensorboardTimeSeries:
r"""Creates a TensorboardTimeSeries.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_tensorboard_time_series():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ tensorboard_time_series = aiplatform_v1.TensorboardTimeSeries()
+ tensorboard_time_series.display_name = "display_name_value"
+ tensorboard_time_series.value_type = "BLOB_SEQUENCE"
+
+ request = aiplatform_v1.CreateTensorboardTimeSeriesRequest(
+ parent="parent_value",
+ tensorboard_time_series=tensorboard_time_series,
+ )
+
+ # Make the request
+ response = client.create_tensorboard_time_series(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateTensorboardTimeSeriesRequest, dict]):
The request object. Request message for
@@ -1858,6 +2256,25 @@ async def get_tensorboard_time_series(
) -> tensorboard_time_series.TensorboardTimeSeries:
r"""Gets a TensorboardTimeSeries.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_tensorboard_time_series():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetTensorboardTimeSeriesRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_tensorboard_time_series(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetTensorboardTimeSeriesRequest, dict]):
The request object. Request message for
@@ -1933,6 +2350,29 @@ async def update_tensorboard_time_series(
) -> gca_tensorboard_time_series.TensorboardTimeSeries:
r"""Updates a TensorboardTimeSeries.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_update_tensorboard_time_series():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ tensorboard_time_series = aiplatform_v1.TensorboardTimeSeries()
+ tensorboard_time_series.display_name = "display_name_value"
+ tensorboard_time_series.value_type = "BLOB_SEQUENCE"
+
+ request = aiplatform_v1.UpdateTensorboardTimeSeriesRequest(
+ tensorboard_time_series=tensorboard_time_series,
+ )
+
+ # Make the request
+ response = client.update_tensorboard_time_series(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateTensorboardTimeSeriesRequest, dict]):
The request object. Request message for
@@ -2029,6 +2469,26 @@ async def list_tensorboard_time_series(
) -> pagers.ListTensorboardTimeSeriesAsyncPager:
r"""Lists TensorboardTimeSeries in a Location.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_tensorboard_time_series():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListTensorboardTimeSeriesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_tensorboard_time_series(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListTensorboardTimeSeriesRequest, dict]):
The request object. Request message for
@@ -2113,6 +2573,29 @@ async def delete_tensorboard_time_series(
) -> operation_async.AsyncOperation:
r"""Deletes a TensorboardTimeSeries.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_tensorboard_time_series():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteTensorboardTimeSeriesRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_tensorboard_time_series(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteTensorboardTimeSeriesRequest, dict]):
The request object. Request message for
@@ -2213,6 +2696,27 @@ async def batch_read_tensorboard_time_series_data(
Otherwise, that limit number of data points will be
randomly selected from this time series and returned.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_batch_read_tensorboard_time_series_data():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.BatchReadTensorboardTimeSeriesDataRequest(
+ tensorboard="tensorboard_value",
+ time_series=['time_series_value_1', 'time_series_value_2'],
+ )
+
+ # Make the request
+ response = client.batch_read_tensorboard_time_series_data(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.BatchReadTensorboardTimeSeriesDataRequest, dict]):
The request object. Request message for
@@ -2297,6 +2801,26 @@ async def read_tensorboard_time_series_data(
from this time series and returned. This value can be changed by
changing max_data_points, which can't be greater than 10k.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_read_tensorboard_time_series_data():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ReadTensorboardTimeSeriesDataRequest(
+ tensorboard_time_series="tensorboard_time_series_value",
+ )
+
+ # Make the request
+ response = client.read_tensorboard_time_series_data(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ReadTensorboardTimeSeriesDataRequest, dict]):
The request object. Request message for
@@ -2374,6 +2898,27 @@ def read_tensorboard_blob_data(
project's Cloud Storage bucket without users having to
obtain Cloud Storage access permission.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_read_tensorboard_blob_data():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ReadTensorboardBlobDataRequest(
+ time_series="time_series_value",
+ )
+
+ # Make the request
+ stream = client.read_tensorboard_blob_data(request=request)
+
+ # Handle the response
+ for response in stream:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ReadTensorboardBlobDataRequest, dict]):
The request object. Request message for
@@ -2455,6 +3000,32 @@ async def write_tensorboard_experiment_data(
TensorboardTimeSeries in multiple TensorboardRun's. If
any data fail to be ingested, an error will be returned.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_write_tensorboard_experiment_data():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ write_run_data_requests = aiplatform_v1.WriteTensorboardRunDataRequest()
+ write_run_data_requests.tensorboard_run = "tensorboard_run_value"
+ write_run_data_requests.time_series_data.tensorboard_time_series_id = "tensorboard_time_series_id_value"
+ write_run_data_requests.time_series_data.value_type = "BLOB_SEQUENCE"
+
+ request = aiplatform_v1.WriteTensorboardExperimentDataRequest(
+ tensorboard_experiment="tensorboard_experiment_value",
+ write_run_data_requests=write_run_data_requests,
+ )
+
+ # Make the request
+ response = client.write_tensorboard_experiment_data(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.WriteTensorboardExperimentDataRequest, dict]):
The request object. Request message for
@@ -2541,6 +3112,31 @@ async def write_tensorboard_run_data(
TensorboardTimeSeries under a TensorboardRun. If any
data fail to be ingested, an error will be returned.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_write_tensorboard_run_data():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ time_series_data = aiplatform_v1.TimeSeriesData()
+ time_series_data.tensorboard_time_series_id = "tensorboard_time_series_id_value"
+ time_series_data.value_type = "BLOB_SEQUENCE"
+
+ request = aiplatform_v1.WriteTensorboardRunDataRequest(
+ tensorboard_run="tensorboard_run_value",
+ time_series_data=time_series_data,
+ )
+
+ # Make the request
+ response = client.write_tensorboard_run_data(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.WriteTensorboardRunDataRequest, dict]):
The request object. Request message for
@@ -2633,6 +3229,27 @@ async def export_tensorboard_time_series_data(
r"""Exports a TensorboardTimeSeries' data. Data is
returned in paginated responses.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_export_tensorboard_time_series_data():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ExportTensorboardTimeSeriesDataRequest(
+ tensorboard_time_series="tensorboard_time_series_value",
+ )
+
+ # Make the request
+ page_result = client.export_tensorboard_time_series_data(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ExportTensorboardTimeSeriesDataRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1/services/tensorboard_service/client.py b/google/cloud/aiplatform_v1/services/tensorboard_service/client.py
index 70a4aa8fa4..73227b57c1 100644
--- a/google/cloud/aiplatform_v1/services/tensorboard_service/client.py
+++ b/google/cloud/aiplatform_v1/services/tensorboard_service/client.py
@@ -504,6 +504,34 @@ def create_tensorboard(
) -> gac_operation.Operation:
r"""Creates a Tensorboard.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_tensorboard():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ tensorboard = aiplatform_v1.Tensorboard()
+ tensorboard.display_name = "display_name_value"
+
+ request = aiplatform_v1.CreateTensorboardRequest(
+ parent="parent_value",
+ tensorboard=tensorboard,
+ )
+
+ # Make the request
+ operation = client.create_tensorboard(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateTensorboardRequest, dict]):
The request object. Request message for
@@ -595,6 +623,26 @@ def get_tensorboard(
) -> tensorboard.Tensorboard:
r"""Gets a Tensorboard.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_tensorboard():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetTensorboardRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_tensorboard(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetTensorboardRequest, dict]):
The request object. Request message for
@@ -671,6 +719,33 @@ def update_tensorboard(
) -> gac_operation.Operation:
r"""Updates a Tensorboard.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_update_tensorboard():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ tensorboard = aiplatform_v1.Tensorboard()
+ tensorboard.display_name = "display_name_value"
+
+ request = aiplatform_v1.UpdateTensorboardRequest(
+ tensorboard=tensorboard,
+ )
+
+ # Make the request
+ operation = client.update_tensorboard(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateTensorboardRequest, dict]):
The request object. Request message for
@@ -771,6 +846,27 @@ def list_tensorboards(
) -> pagers.ListTensorboardsPager:
r"""Lists Tensorboards in a Location.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_tensorboards():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListTensorboardsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_tensorboards(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListTensorboardsRequest, dict]):
The request object. Request message for
@@ -852,6 +948,30 @@ def delete_tensorboard(
) -> gac_operation.Operation:
r"""Deletes a Tensorboard.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_tensorboard():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteTensorboardRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_tensorboard(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteTensorboardRequest, dict]):
The request object. Request message for
@@ -949,6 +1069,27 @@ def create_tensorboard_experiment(
) -> gca_tensorboard_experiment.TensorboardExperiment:
r"""Creates a TensorboardExperiment.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_tensorboard_experiment():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.CreateTensorboardExperimentRequest(
+ parent="parent_value",
+ tensorboard_experiment_id="tensorboard_experiment_id_value",
+ )
+
+ # Make the request
+ response = client.create_tensorboard_experiment(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateTensorboardExperimentRequest, dict]):
The request object. Request message for
@@ -1051,6 +1192,26 @@ def get_tensorboard_experiment(
) -> tensorboard_experiment.TensorboardExperiment:
r"""Gets a TensorboardExperiment.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_tensorboard_experiment():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetTensorboardExperimentRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_tensorboard_experiment(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetTensorboardExperimentRequest, dict]):
The request object. Request message for
@@ -1130,6 +1291,25 @@ def update_tensorboard_experiment(
) -> gca_tensorboard_experiment.TensorboardExperiment:
r"""Updates a TensorboardExperiment.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_update_tensorboard_experiment():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.UpdateTensorboardExperimentRequest(
+ )
+
+ # Make the request
+ response = client.update_tensorboard_experiment(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateTensorboardExperimentRequest, dict]):
The request object. Request message for
@@ -1227,6 +1407,27 @@ def list_tensorboard_experiments(
) -> pagers.ListTensorboardExperimentsPager:
r"""Lists TensorboardExperiments in a Location.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_tensorboard_experiments():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListTensorboardExperimentsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_tensorboard_experiments(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListTensorboardExperimentsRequest, dict]):
The request object. Request message for
@@ -1315,6 +1516,30 @@ def delete_tensorboard_experiment(
) -> gac_operation.Operation:
r"""Deletes a TensorboardExperiment.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_tensorboard_experiment():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteTensorboardExperimentRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_tensorboard_experiment(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteTensorboardExperimentRequest, dict]):
The request object. Request message for
@@ -1414,6 +1639,31 @@ def create_tensorboard_run(
) -> gca_tensorboard_run.TensorboardRun:
r"""Creates a TensorboardRun.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_tensorboard_run():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ tensorboard_run = aiplatform_v1.TensorboardRun()
+ tensorboard_run.display_name = "display_name_value"
+
+ request = aiplatform_v1.CreateTensorboardRunRequest(
+ parent="parent_value",
+ tensorboard_run=tensorboard_run,
+ tensorboard_run_id="tensorboard_run_id_value",
+ )
+
+ # Make the request
+ response = client.create_tensorboard_run(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateTensorboardRunRequest, dict]):
The request object. Request message for
@@ -1513,6 +1763,32 @@ def batch_create_tensorboard_runs(
) -> tensorboard_service.BatchCreateTensorboardRunsResponse:
r"""Batch create TensorboardRuns.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_batch_create_tensorboard_runs():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ requests = aiplatform_v1.CreateTensorboardRunRequest()
+ requests.parent = "parent_value"
+ requests.tensorboard_run.display_name = "display_name_value"
+ requests.tensorboard_run_id = "tensorboard_run_id_value"
+
+ request = aiplatform_v1.BatchCreateTensorboardRunsRequest(
+ parent="parent_value",
+ requests=requests,
+ )
+
+ # Make the request
+ response = client.batch_create_tensorboard_runs(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.BatchCreateTensorboardRunsRequest, dict]):
The request object. Request message for
@@ -1603,6 +1879,26 @@ def get_tensorboard_run(
) -> tensorboard_run.TensorboardRun:
r"""Gets a TensorboardRun.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_tensorboard_run():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetTensorboardRunRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_tensorboard_run(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetTensorboardRunRequest, dict]):
The request object. Request message for
@@ -1678,6 +1974,29 @@ def update_tensorboard_run(
) -> gca_tensorboard_run.TensorboardRun:
r"""Updates a TensorboardRun.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_update_tensorboard_run():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ tensorboard_run = aiplatform_v1.TensorboardRun()
+ tensorboard_run.display_name = "display_name_value"
+
+ request = aiplatform_v1.UpdateTensorboardRunRequest(
+ tensorboard_run=tensorboard_run,
+ )
+
+ # Make the request
+ response = client.update_tensorboard_run(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateTensorboardRunRequest, dict]):
The request object. Request message for
@@ -1768,6 +2087,27 @@ def list_tensorboard_runs(
) -> pagers.ListTensorboardRunsPager:
r"""Lists TensorboardRuns in a Location.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_tensorboard_runs():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListTensorboardRunsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_tensorboard_runs(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListTensorboardRunsRequest, dict]):
The request object. Request message for
@@ -1850,6 +2190,30 @@ def delete_tensorboard_run(
) -> gac_operation.Operation:
r"""Deletes a TensorboardRun.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_tensorboard_run():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteTensorboardRunRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_tensorboard_run(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteTensorboardRunRequest, dict]):
The request object. Request message for
@@ -1949,6 +2313,33 @@ def batch_create_tensorboard_time_series(
r"""Batch create TensorboardTimeSeries that belong to a
TensorboardExperiment.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_batch_create_tensorboard_time_series():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ requests = aiplatform_v1.CreateTensorboardTimeSeriesRequest()
+ requests.parent = "parent_value"
+ requests.tensorboard_time_series.display_name = "display_name_value"
+ requests.tensorboard_time_series.value_type = "BLOB_SEQUENCE"
+
+ request = aiplatform_v1.BatchCreateTensorboardTimeSeriesRequest(
+ parent="parent_value",
+ requests=requests,
+ )
+
+ # Make the request
+ response = client.batch_create_tensorboard_time_series(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.BatchCreateTensorboardTimeSeriesRequest, dict]):
The request object. Request message for
@@ -2045,6 +2436,31 @@ def create_tensorboard_time_series(
) -> gca_tensorboard_time_series.TensorboardTimeSeries:
r"""Creates a TensorboardTimeSeries.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_tensorboard_time_series():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ tensorboard_time_series = aiplatform_v1.TensorboardTimeSeries()
+ tensorboard_time_series.display_name = "display_name_value"
+ tensorboard_time_series.value_type = "BLOB_SEQUENCE"
+
+ request = aiplatform_v1.CreateTensorboardTimeSeriesRequest(
+ parent="parent_value",
+ tensorboard_time_series=tensorboard_time_series,
+ )
+
+ # Make the request
+ response = client.create_tensorboard_time_series(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateTensorboardTimeSeriesRequest, dict]):
The request object. Request message for
@@ -2132,6 +2548,26 @@ def get_tensorboard_time_series(
) -> tensorboard_time_series.TensorboardTimeSeries:
r"""Gets a TensorboardTimeSeries.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_tensorboard_time_series():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetTensorboardTimeSeriesRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_tensorboard_time_series(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetTensorboardTimeSeriesRequest, dict]):
The request object. Request message for
@@ -2209,6 +2645,30 @@ def update_tensorboard_time_series(
) -> gca_tensorboard_time_series.TensorboardTimeSeries:
r"""Updates a TensorboardTimeSeries.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_update_tensorboard_time_series():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ tensorboard_time_series = aiplatform_v1.TensorboardTimeSeries()
+ tensorboard_time_series.display_name = "display_name_value"
+ tensorboard_time_series.value_type = "BLOB_SEQUENCE"
+
+ request = aiplatform_v1.UpdateTensorboardTimeSeriesRequest(
+ tensorboard_time_series=tensorboard_time_series,
+ )
+
+ # Make the request
+ response = client.update_tensorboard_time_series(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateTensorboardTimeSeriesRequest, dict]):
The request object. Request message for
@@ -2309,6 +2769,27 @@ def list_tensorboard_time_series(
) -> pagers.ListTensorboardTimeSeriesPager:
r"""Lists TensorboardTimeSeries in a Location.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_tensorboard_time_series():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListTensorboardTimeSeriesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_tensorboard_time_series(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListTensorboardTimeSeriesRequest, dict]):
The request object. Request message for
@@ -2397,6 +2878,30 @@ def delete_tensorboard_time_series(
) -> gac_operation.Operation:
r"""Deletes a TensorboardTimeSeries.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_tensorboard_time_series():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteTensorboardTimeSeriesRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_tensorboard_time_series(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteTensorboardTimeSeriesRequest, dict]):
The request object. Request message for
@@ -2501,6 +3006,28 @@ def batch_read_tensorboard_time_series_data(
Otherwise, that limit number of data points will be
randomly selected from this time series and returned.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_batch_read_tensorboard_time_series_data():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.BatchReadTensorboardTimeSeriesDataRequest(
+ tensorboard="tensorboard_value",
+ time_series=['time_series_value_1', 'time_series_value_2'],
+ )
+
+ # Make the request
+ response = client.batch_read_tensorboard_time_series_data(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.BatchReadTensorboardTimeSeriesDataRequest, dict]):
The request object. Request message for
@@ -2591,6 +3118,27 @@ def read_tensorboard_time_series_data(
from this time series and returned. This value can be changed by
changing max_data_points, which can't be greater than 10k.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_read_tensorboard_time_series_data():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ReadTensorboardTimeSeriesDataRequest(
+ tensorboard_time_series="tensorboard_time_series_value",
+ )
+
+ # Make the request
+ response = client.read_tensorboard_time_series_data(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ReadTensorboardTimeSeriesDataRequest, dict]):
The request object. Request message for
@@ -2672,6 +3220,28 @@ def read_tensorboard_blob_data(
project's Cloud Storage bucket without users having to
obtain Cloud Storage access permission.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_read_tensorboard_blob_data():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ReadTensorboardBlobDataRequest(
+ time_series="time_series_value",
+ )
+
+ # Make the request
+ stream = client.read_tensorboard_blob_data(request=request)
+
+ # Handle the response
+ for response in stream:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ReadTensorboardBlobDataRequest, dict]):
The request object. Request message for
@@ -2755,6 +3325,33 @@ def write_tensorboard_experiment_data(
TensorboardTimeSeries in multiple TensorboardRun's. If
any data fail to be ingested, an error will be returned.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_write_tensorboard_experiment_data():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ write_run_data_requests = aiplatform_v1.WriteTensorboardRunDataRequest()
+ write_run_data_requests.tensorboard_run = "tensorboard_run_value"
+ write_run_data_requests.time_series_data.tensorboard_time_series_id = "tensorboard_time_series_id_value"
+ write_run_data_requests.time_series_data.value_type = "BLOB_SEQUENCE"
+
+ request = aiplatform_v1.WriteTensorboardExperimentDataRequest(
+ tensorboard_experiment="tensorboard_experiment_value",
+ write_run_data_requests=write_run_data_requests,
+ )
+
+ # Make the request
+ response = client.write_tensorboard_experiment_data(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.WriteTensorboardExperimentDataRequest, dict]):
The request object. Request message for
@@ -2845,6 +3442,32 @@ def write_tensorboard_run_data(
TensorboardTimeSeries under a TensorboardRun. If any
data fail to be ingested, an error will be returned.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_write_tensorboard_run_data():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ time_series_data = aiplatform_v1.TimeSeriesData()
+ time_series_data.tensorboard_time_series_id = "tensorboard_time_series_id_value"
+ time_series_data.value_type = "BLOB_SEQUENCE"
+
+ request = aiplatform_v1.WriteTensorboardRunDataRequest(
+ tensorboard_run="tensorboard_run_value",
+ time_series_data=time_series_data,
+ )
+
+ # Make the request
+ response = client.write_tensorboard_run_data(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.WriteTensorboardRunDataRequest, dict]):
The request object. Request message for
@@ -2939,6 +3562,28 @@ def export_tensorboard_time_series_data(
r"""Exports a TensorboardTimeSeries' data. Data is
returned in paginated responses.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_export_tensorboard_time_series_data():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ExportTensorboardTimeSeriesDataRequest(
+ tensorboard_time_series="tensorboard_time_series_value",
+ )
+
+ # Make the request
+ page_result = client.export_tensorboard_time_series_data(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ExportTensorboardTimeSeriesDataRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1/services/vizier_service/async_client.py b/google/cloud/aiplatform_v1/services/vizier_service/async_client.py
index aa974f48ea..3d33df3151 100644
--- a/google/cloud/aiplatform_v1/services/vizier_service/async_client.py
+++ b/google/cloud/aiplatform_v1/services/vizier_service/async_client.py
@@ -228,6 +228,35 @@ async def create_study(
r"""Creates a Study. A resource name will be generated
after creation of the Study.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_study():
+ # Create a client
+ client = aiplatform_v1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ study = aiplatform_v1.Study()
+ study.display_name = "display_name_value"
+ study.study_spec.metrics.metric_id = "metric_id_value"
+ study.study_spec.metrics.goal = "MINIMIZE"
+ study.study_spec.parameters.double_value_spec.min_value = 0.96
+ study.study_spec.parameters.double_value_spec.max_value = 0.962
+ study.study_spec.parameters.parameter_id = "parameter_id_value"
+
+ request = aiplatform_v1.CreateStudyRequest(
+ parent="parent_value",
+ study=study,
+ )
+
+ # Make the request
+ response = client.create_study(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateStudyRequest, dict]):
The request object. Request message for
@@ -307,6 +336,25 @@ async def get_study(
) -> study.Study:
r"""Gets a Study by name.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_study():
+ # Create a client
+ client = aiplatform_v1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetStudyRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_study(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetStudyRequest, dict]):
The request object. Request message for
@@ -377,6 +425,27 @@ async def list_studies(
r"""Lists all the studies in a region for an associated
project.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_studies():
+ # Create a client
+ client = aiplatform_v1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListStudiesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_studies(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListStudiesRequest, dict]):
The request object. Request message for
@@ -458,6 +527,22 @@ async def delete_study(
) -> None:
r"""Deletes a Study.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_study():
+ # Create a client
+ client = aiplatform_v1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteStudyRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.delete_study(request=request)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteStudyRequest, dict]):
The request object. Request message for
@@ -524,6 +609,27 @@ async def lookup_study(
r"""Looks a study up using the user-defined display_name field
instead of the fully qualified resource name.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_lookup_study():
+ # Create a client
+ client = aiplatform_v1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.LookupStudyRequest(
+ parent="parent_value",
+ display_name="display_name_value",
+ )
+
+ # Make the request
+ response = client.lookup_study(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.LookupStudyRequest, dict]):
The request object. Request message for
@@ -597,6 +703,32 @@ async def suggest_trials(
long-running operation succeeds, it will contain a
[SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse].
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_suggest_trials():
+ # Create a client
+ client = aiplatform_v1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.SuggestTrialsRequest(
+ parent="parent_value",
+ suggestion_count=1744,
+ client_id="client_id_value",
+ )
+
+ # Make the request
+ operation = client.suggest_trials(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.SuggestTrialsRequest, dict]):
The request object. Request message for
@@ -660,6 +792,25 @@ async def create_trial(
) -> study.Trial:
r"""Adds a user provided Trial to a Study.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_trial():
+ # Create a client
+ client = aiplatform_v1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.CreateTrialRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ response = client.create_trial(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateTrialRequest, dict]):
The request object. Request message for
@@ -742,6 +893,25 @@ async def get_trial(
) -> study.Trial:
r"""Gets a Trial.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_trial():
+ # Create a client
+ client = aiplatform_v1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetTrialRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_trial(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetTrialRequest, dict]):
The request object. Request message for
@@ -816,6 +986,26 @@ async def list_trials(
) -> pagers.ListTrialsAsyncPager:
r"""Lists the Trials associated with a Study.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_trials():
+ # Create a client
+ client = aiplatform_v1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListTrialsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_trials(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListTrialsRequest, dict]):
The request object. Request message for
@@ -898,6 +1088,26 @@ async def add_trial_measurement(
Trial. This measurement is assumed to have been taken
before the Trial is complete.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_add_trial_measurement():
+ # Create a client
+ client = aiplatform_v1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.AddTrialMeasurementRequest(
+ trial_name="trial_name_value",
+ )
+
+ # Make the request
+ response = client.add_trial_measurement(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.AddTrialMeasurementRequest, dict]):
The request object. Request message for
@@ -952,6 +1162,25 @@ async def complete_trial(
) -> study.Trial:
r"""Marks a Trial as complete.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_complete_trial():
+ # Create a client
+ client = aiplatform_v1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.CompleteTrialRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.complete_trial(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CompleteTrialRequest, dict]):
The request object. Request message for
@@ -1005,6 +1234,22 @@ async def delete_trial(
) -> None:
r"""Deletes a Trial.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_trial():
+ # Create a client
+ client = aiplatform_v1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteTrialRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.delete_trial(request=request)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteTrialRequest, dict]):
The request object. Request message for
@@ -1071,6 +1316,30 @@ async def check_trial_early_stopping_state(
will contain a
[CheckTrialEarlyStoppingStateResponse][google.cloud.ml.v1.CheckTrialEarlyStoppingStateResponse].
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_check_trial_early_stopping_state():
+ # Create a client
+ client = aiplatform_v1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.CheckTrialEarlyStoppingStateRequest(
+ trial_name="trial_name_value",
+ )
+
+ # Make the request
+ operation = client.check_trial_early_stopping_state(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CheckTrialEarlyStoppingStateRequest, dict]):
The request object. Request message for
@@ -1134,6 +1403,25 @@ async def stop_trial(
) -> study.Trial:
r"""Stops a Trial.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_stop_trial():
+ # Create a client
+ client = aiplatform_v1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.StopTrialRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.stop_trial(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.StopTrialRequest, dict]):
The request object. Request message for
@@ -1190,6 +1478,26 @@ async def list_optimal_trials(
pareto-optimal can be checked in wiki page.
https://en.wikipedia.org/wiki/Pareto_efficiency
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_optimal_trials():
+ # Create a client
+ client = aiplatform_v1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListOptimalTrialsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ response = client.list_optimal_trials(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListOptimalTrialsRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1/services/vizier_service/client.py b/google/cloud/aiplatform_v1/services/vizier_service/client.py
index 4c0423a036..92233b96a2 100644
--- a/google/cloud/aiplatform_v1/services/vizier_service/client.py
+++ b/google/cloud/aiplatform_v1/services/vizier_service/client.py
@@ -451,6 +451,36 @@ def create_study(
r"""Creates a Study. A resource name will be generated
after creation of the Study.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_study():
+ # Create a client
+ client = aiplatform_v1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ study = aiplatform_v1.Study()
+ study.display_name = "display_name_value"
+ study.study_spec.metrics.metric_id = "metric_id_value"
+ study.study_spec.metrics.goal = "MINIMIZE"
+ study.study_spec.parameters.double_value_spec.min_value = 0.96
+ study.study_spec.parameters.double_value_spec.max_value = 0.962
+ study.study_spec.parameters.parameter_id = "parameter_id_value"
+
+ request = aiplatform_v1.CreateStudyRequest(
+ parent="parent_value",
+ study=study,
+ )
+
+ # Make the request
+ response = client.create_study(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateStudyRequest, dict]):
The request object. Request message for
@@ -530,6 +560,26 @@ def get_study(
) -> study.Study:
r"""Gets a Study by name.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_study():
+ # Create a client
+ client = aiplatform_v1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetStudyRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_study(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetStudyRequest, dict]):
The request object. Request message for
@@ -600,6 +650,28 @@ def list_studies(
r"""Lists all the studies in a region for an associated
project.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_studies():
+ # Create a client
+ client = aiplatform_v1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListStudiesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_studies(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListStudiesRequest, dict]):
The request object. Request message for
@@ -681,6 +753,23 @@ def delete_study(
) -> None:
r"""Deletes a Study.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_study():
+ # Create a client
+ client = aiplatform_v1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteStudyRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.delete_study(request=request)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteStudyRequest, dict]):
The request object. Request message for
@@ -747,6 +836,28 @@ def lookup_study(
r"""Looks a study up using the user-defined display_name field
instead of the fully qualified resource name.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_lookup_study():
+ # Create a client
+ client = aiplatform_v1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.LookupStudyRequest(
+ parent="parent_value",
+ display_name="display_name_value",
+ )
+
+ # Make the request
+ response = client.lookup_study(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.LookupStudyRequest, dict]):
The request object. Request message for
@@ -820,6 +931,33 @@ def suggest_trials(
long-running operation succeeds, it will contain a
[SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse].
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_suggest_trials():
+ # Create a client
+ client = aiplatform_v1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.SuggestTrialsRequest(
+ parent="parent_value",
+ suggestion_count=1744,
+ client_id="client_id_value",
+ )
+
+ # Make the request
+ operation = client.suggest_trials(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.SuggestTrialsRequest, dict]):
The request object. Request message for
@@ -884,6 +1022,26 @@ def create_trial(
) -> study.Trial:
r"""Adds a user provided Trial to a Study.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_create_trial():
+ # Create a client
+ client = aiplatform_v1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.CreateTrialRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ response = client.create_trial(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateTrialRequest, dict]):
The request object. Request message for
@@ -966,6 +1124,26 @@ def get_trial(
) -> study.Trial:
r"""Gets a Trial.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_get_trial():
+ # Create a client
+ client = aiplatform_v1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetTrialRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_trial(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.GetTrialRequest, dict]):
The request object. Request message for
@@ -1040,6 +1218,27 @@ def list_trials(
) -> pagers.ListTrialsPager:
r"""Lists the Trials associated with a Study.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_trials():
+ # Create a client
+ client = aiplatform_v1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListTrialsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_trials(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListTrialsRequest, dict]):
The request object. Request message for
@@ -1122,6 +1321,27 @@ def add_trial_measurement(
Trial. This measurement is assumed to have been taken
before the Trial is complete.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_add_trial_measurement():
+ # Create a client
+ client = aiplatform_v1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.AddTrialMeasurementRequest(
+ trial_name="trial_name_value",
+ )
+
+ # Make the request
+ response = client.add_trial_measurement(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.AddTrialMeasurementRequest, dict]):
The request object. Request message for
@@ -1177,6 +1397,26 @@ def complete_trial(
) -> study.Trial:
r"""Marks a Trial as complete.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_complete_trial():
+ # Create a client
+ client = aiplatform_v1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.CompleteTrialRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.complete_trial(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CompleteTrialRequest, dict]):
The request object. Request message for
@@ -1231,6 +1471,23 @@ def delete_trial(
) -> None:
r"""Deletes a Trial.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_delete_trial():
+ # Create a client
+ client = aiplatform_v1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteTrialRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.delete_trial(request=request)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteTrialRequest, dict]):
The request object. Request message for
@@ -1297,6 +1554,31 @@ def check_trial_early_stopping_state(
will contain a
[CheckTrialEarlyStoppingStateResponse][google.cloud.ml.v1.CheckTrialEarlyStoppingStateResponse].
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_check_trial_early_stopping_state():
+ # Create a client
+ client = aiplatform_v1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.CheckTrialEarlyStoppingStateRequest(
+ trial_name="trial_name_value",
+ )
+
+ # Make the request
+ operation = client.check_trial_early_stopping_state(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.CheckTrialEarlyStoppingStateRequest, dict]):
The request object. Request message for
@@ -1363,6 +1645,26 @@ def stop_trial(
) -> study.Trial:
r"""Stops a Trial.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_stop_trial():
+ # Create a client
+ client = aiplatform_v1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.StopTrialRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.stop_trial(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.StopTrialRequest, dict]):
The request object. Request message for
@@ -1420,6 +1722,27 @@ def list_optimal_trials(
pareto-optimal can be checked in wiki page.
https://en.wikipedia.org/wiki/Pareto_efficiency
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1
+
+ def sample_list_optimal_trials():
+ # Create a client
+ client = aiplatform_v1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListOptimalTrialsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ response = client.list_optimal_trials(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1.types.ListOptimalTrialsRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1/types/accelerator_type.py b/google/cloud/aiplatform_v1/types/accelerator_type.py
index 168fe2474f..2bae849e7c 100644
--- a/google/cloud/aiplatform_v1/types/accelerator_type.py
+++ b/google/cloud/aiplatform_v1/types/accelerator_type.py
@@ -30,6 +30,8 @@ class AcceleratorType(proto.Enum):
NVIDIA_TESLA_P4 = 4
NVIDIA_TESLA_T4 = 5
NVIDIA_TESLA_A100 = 8
+ TPU_V2 = 6
+ TPU_V3 = 7
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform_v1/types/job_service.py b/google/cloud/aiplatform_v1/types/job_service.py
index 67d1df1469..ba8a1b33b0 100644
--- a/google/cloud/aiplatform_v1/types/job_service.py
+++ b/google/cloud/aiplatform_v1/types/job_service.py
@@ -628,7 +628,7 @@ class SearchModelDeploymentMonitoringStatsAnomaliesRequest(proto.Message):
model_deployment_monitoring_job (str):
Required. ModelDeploymentMonitoring Job resource name.
Format:
- \`projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}
+ ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}``
deployed_model_id (str):
Required. The DeployedModel ID of the
[ModelDeploymentMonitoringObjectiveConfig.deployed_model_id].
diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py
index 452872c7fa..2a8a59ea69 100644
--- a/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py
+++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py
@@ -237,6 +237,35 @@ async def create_dataset(
) -> operation_async.AsyncOperation:
r"""Creates a Dataset.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_dataset():
+ # Create a client
+ client = aiplatform_v1beta1.DatasetServiceClient()
+
+ # Initialize request argument(s)
+ dataset = aiplatform_v1beta1.Dataset()
+ dataset.display_name = "display_name_value"
+ dataset.metadata_schema_uri = "metadata_schema_uri_value"
+ dataset.metadata.null_value = "NULL_VALUE"
+
+ request = aiplatform_v1beta1.CreateDatasetRequest(
+ parent="parent_value",
+ dataset=dataset,
+ )
+
+ # Make the request
+ operation = client.create_dataset(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateDatasetRequest, dict]):
The request object. Request message for
@@ -327,6 +356,25 @@ async def get_dataset(
) -> dataset.Dataset:
r"""Gets a Dataset.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_dataset():
+ # Create a client
+ client = aiplatform_v1beta1.DatasetServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetDatasetRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_dataset(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetDatasetRequest, dict]):
The request object. Request message for
@@ -399,6 +447,30 @@ async def update_dataset(
) -> gca_dataset.Dataset:
r"""Updates a Dataset.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_update_dataset():
+ # Create a client
+ client = aiplatform_v1beta1.DatasetServiceClient()
+
+ # Initialize request argument(s)
+ dataset = aiplatform_v1beta1.Dataset()
+ dataset.display_name = "display_name_value"
+ dataset.metadata_schema_uri = "metadata_schema_uri_value"
+ dataset.metadata.null_value = "NULL_VALUE"
+
+ request = aiplatform_v1beta1.UpdateDatasetRequest(
+ dataset=dataset,
+ )
+
+ # Make the request
+ response = client.update_dataset(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.UpdateDatasetRequest, dict]):
The request object. Request message for
@@ -487,6 +559,26 @@ async def list_datasets(
) -> pagers.ListDatasetsAsyncPager:
r"""Lists Datasets in a Location.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_datasets():
+ # Create a client
+ client = aiplatform_v1beta1.DatasetServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListDatasetsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_datasets(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListDatasetsRequest, dict]):
The request object. Request message for
@@ -567,6 +659,29 @@ async def delete_dataset(
) -> operation_async.AsyncOperation:
r"""Deletes a Dataset.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_dataset():
+ # Create a client
+ client = aiplatform_v1beta1.DatasetServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteDatasetRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_dataset(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteDatasetRequest, dict]):
The request object. Request message for
@@ -661,6 +776,34 @@ async def import_data(
) -> operation_async.AsyncOperation:
r"""Imports data into a Dataset.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_import_data():
+ # Create a client
+ client = aiplatform_v1beta1.DatasetServiceClient()
+
+ # Initialize request argument(s)
+ import_configs = aiplatform_v1beta1.ImportDataConfig()
+ import_configs.gcs_source.uris = ['uris_value_1', 'uris_value_2']
+ import_configs.import_schema_uri = "import_schema_uri_value"
+
+ request = aiplatform_v1beta1.ImportDataRequest(
+ name="name_value",
+ import_configs=import_configs,
+ )
+
+ # Make the request
+ operation = client.import_data(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ImportDataRequest, dict]):
The request object. Request message for
@@ -755,6 +898,33 @@ async def export_data(
) -> operation_async.AsyncOperation:
r"""Exports data from a Dataset.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_export_data():
+ # Create a client
+ client = aiplatform_v1beta1.DatasetServiceClient()
+
+ # Initialize request argument(s)
+ export_config = aiplatform_v1beta1.ExportDataConfig()
+ export_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value"
+
+ request = aiplatform_v1beta1.ExportDataRequest(
+ name="name_value",
+ export_config=export_config,
+ )
+
+ # Make the request
+ operation = client.export_data(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ExportDataRequest, dict]):
The request object. Request message for
@@ -847,6 +1017,26 @@ async def list_data_items(
) -> pagers.ListDataItemsAsyncPager:
r"""Lists DataItems in a Dataset.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_data_items():
+ # Create a client
+ client = aiplatform_v1beta1.DatasetServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListDataItemsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_data_items(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListDataItemsRequest, dict]):
The request object. Request message for
@@ -928,6 +1118,25 @@ async def get_annotation_spec(
) -> annotation_spec.AnnotationSpec:
r"""Gets an AnnotationSpec.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_annotation_spec():
+ # Create a client
+ client = aiplatform_v1beta1.DatasetServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetAnnotationSpecRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_annotation_spec(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetAnnotationSpecRequest, dict]):
The request object. Request message for
@@ -1000,6 +1209,26 @@ async def list_annotations(
) -> pagers.ListAnnotationsAsyncPager:
r"""Lists Annotations belongs to a dataitem
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_annotations():
+ # Create a client
+ client = aiplatform_v1beta1.DatasetServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListAnnotationsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_annotations(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListAnnotationsRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py
index 3eb3481772..a063ce1a0f 100644
--- a/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py
+++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py
@@ -487,6 +487,36 @@ def create_dataset(
) -> gac_operation.Operation:
r"""Creates a Dataset.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_dataset():
+ # Create a client
+ client = aiplatform_v1beta1.DatasetServiceClient()
+
+ # Initialize request argument(s)
+ dataset = aiplatform_v1beta1.Dataset()
+ dataset.display_name = "display_name_value"
+ dataset.metadata_schema_uri = "metadata_schema_uri_value"
+ dataset.metadata.null_value = "NULL_VALUE"
+
+ request = aiplatform_v1beta1.CreateDatasetRequest(
+ parent="parent_value",
+ dataset=dataset,
+ )
+
+ # Make the request
+ operation = client.create_dataset(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateDatasetRequest, dict]):
The request object. Request message for
@@ -577,6 +607,26 @@ def get_dataset(
) -> dataset.Dataset:
r"""Gets a Dataset.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_dataset():
+ # Create a client
+ client = aiplatform_v1beta1.DatasetServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetDatasetRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_dataset(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetDatasetRequest, dict]):
The request object. Request message for
@@ -649,6 +699,31 @@ def update_dataset(
) -> gca_dataset.Dataset:
r"""Updates a Dataset.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_update_dataset():
+ # Create a client
+ client = aiplatform_v1beta1.DatasetServiceClient()
+
+ # Initialize request argument(s)
+ dataset = aiplatform_v1beta1.Dataset()
+ dataset.display_name = "display_name_value"
+ dataset.metadata_schema_uri = "metadata_schema_uri_value"
+ dataset.metadata.null_value = "NULL_VALUE"
+
+ request = aiplatform_v1beta1.UpdateDatasetRequest(
+ dataset=dataset,
+ )
+
+ # Make the request
+ response = client.update_dataset(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.UpdateDatasetRequest, dict]):
The request object. Request message for
@@ -737,6 +812,27 @@ def list_datasets(
) -> pagers.ListDatasetsPager:
r"""Lists Datasets in a Location.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_datasets():
+ # Create a client
+ client = aiplatform_v1beta1.DatasetServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListDatasetsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_datasets(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListDatasetsRequest, dict]):
The request object. Request message for
@@ -817,6 +913,30 @@ def delete_dataset(
) -> gac_operation.Operation:
r"""Deletes a Dataset.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_dataset():
+ # Create a client
+ client = aiplatform_v1beta1.DatasetServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteDatasetRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_dataset(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteDatasetRequest, dict]):
The request object. Request message for
@@ -911,6 +1031,35 @@ def import_data(
) -> gac_operation.Operation:
r"""Imports data into a Dataset.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_import_data():
+ # Create a client
+ client = aiplatform_v1beta1.DatasetServiceClient()
+
+ # Initialize request argument(s)
+ import_configs = aiplatform_v1beta1.ImportDataConfig()
+ import_configs.gcs_source.uris = ['uris_value_1', 'uris_value_2']
+ import_configs.import_schema_uri = "import_schema_uri_value"
+
+ request = aiplatform_v1beta1.ImportDataRequest(
+ name="name_value",
+ import_configs=import_configs,
+ )
+
+ # Make the request
+ operation = client.import_data(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ImportDataRequest, dict]):
The request object. Request message for
@@ -1005,6 +1154,34 @@ def export_data(
) -> gac_operation.Operation:
r"""Exports data from a Dataset.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_export_data():
+ # Create a client
+ client = aiplatform_v1beta1.DatasetServiceClient()
+
+ # Initialize request argument(s)
+ export_config = aiplatform_v1beta1.ExportDataConfig()
+ export_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value"
+
+ request = aiplatform_v1beta1.ExportDataRequest(
+ name="name_value",
+ export_config=export_config,
+ )
+
+ # Make the request
+ operation = client.export_data(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ExportDataRequest, dict]):
The request object. Request message for
@@ -1097,6 +1274,27 @@ def list_data_items(
) -> pagers.ListDataItemsPager:
r"""Lists DataItems in a Dataset.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_data_items():
+ # Create a client
+ client = aiplatform_v1beta1.DatasetServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListDataItemsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_data_items(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListDataItemsRequest, dict]):
The request object. Request message for
@@ -1178,6 +1376,26 @@ def get_annotation_spec(
) -> annotation_spec.AnnotationSpec:
r"""Gets an AnnotationSpec.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_annotation_spec():
+ # Create a client
+ client = aiplatform_v1beta1.DatasetServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetAnnotationSpecRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_annotation_spec(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetAnnotationSpecRequest, dict]):
The request object. Request message for
@@ -1250,6 +1468,27 @@ def list_annotations(
) -> pagers.ListAnnotationsPager:
r"""Lists Annotations belongs to a dataitem
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_annotations():
+ # Create a client
+ client = aiplatform_v1beta1.DatasetServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListAnnotationsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_annotations(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListAnnotationsRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py
index 689570f6ce..45ae7863ff 100644
--- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py
+++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py
@@ -234,6 +234,33 @@ async def create_endpoint(
) -> operation_async.AsyncOperation:
r"""Creates an Endpoint.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_endpoint():
+ # Create a client
+ client = aiplatform_v1beta1.EndpointServiceClient()
+
+ # Initialize request argument(s)
+ endpoint = aiplatform_v1beta1.Endpoint()
+ endpoint.display_name = "display_name_value"
+
+ request = aiplatform_v1beta1.CreateEndpointRequest(
+ parent="parent_value",
+ endpoint=endpoint,
+ )
+
+ # Make the request
+ operation = client.create_endpoint(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateEndpointRequest, dict]):
The request object. Request message for
@@ -340,6 +367,25 @@ async def get_endpoint(
) -> endpoint.Endpoint:
r"""Gets an Endpoint.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_endpoint():
+ # Create a client
+ client = aiplatform_v1beta1.EndpointServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetEndpointRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_endpoint(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetEndpointRequest, dict]):
The request object. Request message for
@@ -412,6 +458,26 @@ async def list_endpoints(
) -> pagers.ListEndpointsAsyncPager:
r"""Lists Endpoints in a Location.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_endpoints():
+ # Create a client
+ client = aiplatform_v1beta1.EndpointServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListEndpointsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_endpoints(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListEndpointsRequest, dict]):
The request object. Request message for
@@ -494,6 +560,28 @@ async def update_endpoint(
) -> gca_endpoint.Endpoint:
r"""Updates an Endpoint.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_update_endpoint():
+ # Create a client
+ client = aiplatform_v1beta1.EndpointServiceClient()
+
+ # Initialize request argument(s)
+ endpoint = aiplatform_v1beta1.Endpoint()
+ endpoint.display_name = "display_name_value"
+
+ request = aiplatform_v1beta1.UpdateEndpointRequest(
+ endpoint=endpoint,
+ )
+
+ # Make the request
+ response = client.update_endpoint(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.UpdateEndpointRequest, dict]):
The request object. Request message for
@@ -577,6 +665,29 @@ async def delete_endpoint(
) -> operation_async.AsyncOperation:
r"""Deletes an Endpoint.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_endpoint():
+ # Create a client
+ client = aiplatform_v1beta1.EndpointServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteEndpointRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_endpoint(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteEndpointRequest, dict]):
The request object. Request message for
@@ -675,6 +786,35 @@ async def deploy_model(
r"""Deploys a Model into this Endpoint, creating a
DeployedModel within it.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_deploy_model():
+ # Create a client
+ client = aiplatform_v1beta1.EndpointServiceClient()
+
+ # Initialize request argument(s)
+ deployed_model = aiplatform_v1beta1.DeployedModel()
+ deployed_model.dedicated_resources.min_replica_count = 1803
+ deployed_model.model = "model_value"
+
+ request = aiplatform_v1beta1.DeployModelRequest(
+ endpoint="endpoint_value",
+ deployed_model=deployed_model,
+ )
+
+ # Make the request
+ operation = client.deploy_model(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeployModelRequest, dict]):
The request object. Request message for
@@ -801,6 +941,31 @@ async def undeploy_model(
DeployedModel from it, and freeing all resources it's
using.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_undeploy_model():
+ # Create a client
+ client = aiplatform_v1beta1.EndpointServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.UndeployModelRequest(
+ endpoint="endpoint_value",
+ deployed_model_id="deployed_model_id_value",
+ )
+
+ # Make the request
+ operation = client.undeploy_model(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.UndeployModelRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py
index d9af51ae74..2b296fcea3 100644
--- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py
+++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py
@@ -472,6 +472,34 @@ def create_endpoint(
) -> gac_operation.Operation:
r"""Creates an Endpoint.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_endpoint():
+ # Create a client
+ client = aiplatform_v1beta1.EndpointServiceClient()
+
+ # Initialize request argument(s)
+ endpoint = aiplatform_v1beta1.Endpoint()
+ endpoint.display_name = "display_name_value"
+
+ request = aiplatform_v1beta1.CreateEndpointRequest(
+ parent="parent_value",
+ endpoint=endpoint,
+ )
+
+ # Make the request
+ operation = client.create_endpoint(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateEndpointRequest, dict]):
The request object. Request message for
@@ -578,6 +606,26 @@ def get_endpoint(
) -> endpoint.Endpoint:
r"""Gets an Endpoint.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_endpoint():
+ # Create a client
+ client = aiplatform_v1beta1.EndpointServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetEndpointRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_endpoint(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetEndpointRequest, dict]):
The request object. Request message for
@@ -650,6 +698,27 @@ def list_endpoints(
) -> pagers.ListEndpointsPager:
r"""Lists Endpoints in a Location.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_endpoints():
+ # Create a client
+ client = aiplatform_v1beta1.EndpointServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListEndpointsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_endpoints(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListEndpointsRequest, dict]):
The request object. Request message for
@@ -732,6 +801,29 @@ def update_endpoint(
) -> gca_endpoint.Endpoint:
r"""Updates an Endpoint.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_update_endpoint():
+ # Create a client
+ client = aiplatform_v1beta1.EndpointServiceClient()
+
+ # Initialize request argument(s)
+ endpoint = aiplatform_v1beta1.Endpoint()
+ endpoint.display_name = "display_name_value"
+
+ request = aiplatform_v1beta1.UpdateEndpointRequest(
+ endpoint=endpoint,
+ )
+
+ # Make the request
+ response = client.update_endpoint(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.UpdateEndpointRequest, dict]):
The request object. Request message for
@@ -815,6 +907,30 @@ def delete_endpoint(
) -> gac_operation.Operation:
r"""Deletes an Endpoint.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_endpoint():
+ # Create a client
+ client = aiplatform_v1beta1.EndpointServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteEndpointRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_endpoint(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteEndpointRequest, dict]):
The request object. Request message for
@@ -913,6 +1029,36 @@ def deploy_model(
r"""Deploys a Model into this Endpoint, creating a
DeployedModel within it.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_deploy_model():
+ # Create a client
+ client = aiplatform_v1beta1.EndpointServiceClient()
+
+ # Initialize request argument(s)
+ deployed_model = aiplatform_v1beta1.DeployedModel()
+ deployed_model.dedicated_resources.min_replica_count = 1803
+ deployed_model.model = "model_value"
+
+ request = aiplatform_v1beta1.DeployModelRequest(
+ endpoint="endpoint_value",
+ deployed_model=deployed_model,
+ )
+
+ # Make the request
+ operation = client.deploy_model(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeployModelRequest, dict]):
The request object. Request message for
@@ -1038,6 +1184,32 @@ def undeploy_model(
DeployedModel from it, and freeing all resources it's
using.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_undeploy_model():
+ # Create a client
+ client = aiplatform_v1beta1.EndpointServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.UndeployModelRequest(
+ endpoint="endpoint_value",
+ deployed_model_id="deployed_model_id_value",
+ )
+
+ # Make the request
+ operation = client.undeploy_model(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.UndeployModelRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py
index c26fa4b401..cd8f96fb05 100644
--- a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py
+++ b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py
@@ -244,6 +244,31 @@ async def read_feature_values(
entities of an EntityType, please use
StreamingReadFeatureValues.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_read_feature_values():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreOnlineServingServiceClient()
+
+ # Initialize request argument(s)
+ feature_selector = aiplatform_v1beta1.FeatureSelector()
+ feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2']
+
+ request = aiplatform_v1beta1.ReadFeatureValuesRequest(
+ entity_type="entity_type_value",
+ entity_id="entity_id_value",
+ feature_selector=feature_selector,
+ )
+
+ # Make the request
+ response = client.read_feature_values(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesRequest, dict]):
The request object. Request message for
@@ -327,6 +352,32 @@ def streaming_read_feature_values(
on their size, data for different entities may be broken
up across multiple responses.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_streaming_read_feature_values():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreOnlineServingServiceClient()
+
+ # Initialize request argument(s)
+ feature_selector = aiplatform_v1beta1.FeatureSelector()
+ feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2']
+
+ request = aiplatform_v1beta1.StreamingReadFeatureValuesRequest(
+ entity_type="entity_type_value",
+ entity_ids=['entity_ids_value_1', 'entity_ids_value_2'],
+ feature_selector=feature_selector,
+ )
+
+ # Make the request
+ stream = client.streaming_read_feature_values(request=request)
+
+ # Handle the response
+ for response in stream:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.StreamingReadFeatureValuesRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py
index bbc30e3fb6..5aa2e328c7 100644
--- a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py
+++ b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py
@@ -430,6 +430,32 @@ def read_feature_values(
entities of an EntityType, please use
StreamingReadFeatureValues.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_read_feature_values():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreOnlineServingServiceClient()
+
+ # Initialize request argument(s)
+ feature_selector = aiplatform_v1beta1.FeatureSelector()
+ feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2']
+
+ request = aiplatform_v1beta1.ReadFeatureValuesRequest(
+ entity_type="entity_type_value",
+ entity_id="entity_id_value",
+ feature_selector=feature_selector,
+ )
+
+ # Make the request
+ response = client.read_feature_values(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesRequest, dict]):
The request object. Request message for
@@ -513,6 +539,33 @@ def streaming_read_feature_values(
on their size, data for different entities may be broken
up across multiple responses.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_streaming_read_feature_values():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreOnlineServingServiceClient()
+
+ # Initialize request argument(s)
+ feature_selector = aiplatform_v1beta1.FeatureSelector()
+ feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2']
+
+ request = aiplatform_v1beta1.StreamingReadFeatureValuesRequest(
+ entity_type="entity_type_value",
+ entity_ids=['entity_ids_value_1', 'entity_ids_value_2'],
+ feature_selector=feature_selector,
+ )
+
+ # Make the request
+ stream = client.streaming_read_feature_values(request=request)
+
+ # Handle the response
+ for response in stream:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.StreamingReadFeatureValuesRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py
index 6d6a4bb32e..019d4b09f2 100644
--- a/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py
+++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py
@@ -242,6 +242,31 @@ async def create_featurestore(
r"""Creates a new Featurestore in a given project and
location.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_featurestore():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.CreateFeaturestoreRequest(
+ parent="parent_value",
+ featurestore_id="featurestore_id_value",
+ )
+
+ # Make the request
+ operation = client.create_featurestore(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateFeaturestoreRequest, dict]):
The request object. Request message for
@@ -350,6 +375,25 @@ async def get_featurestore(
) -> featurestore.Featurestore:
r"""Gets details of a single Featurestore.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_featurestore():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetFeaturestoreRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_featurestore(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetFeaturestoreRequest, dict]):
The request object. Request message for
@@ -424,6 +468,26 @@ async def list_featurestores(
) -> pagers.ListFeaturestoresAsyncPager:
r"""Lists Featurestores in a given project and location.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_featurestores():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListFeaturestoresRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_featurestores(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListFeaturestoresRequest, dict]):
The request object. Request message for
@@ -506,6 +570,28 @@ async def update_featurestore(
) -> operation_async.AsyncOperation:
r"""Updates the parameters of a single Featurestore.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_update_featurestore():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.UpdateFeaturestoreRequest(
+ )
+
+ # Make the request
+ operation = client.update_featurestore(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.UpdateFeaturestoreRequest, dict]):
The request object. Request message for
@@ -615,6 +701,30 @@ async def delete_featurestore(
any EntityTypes or ``force`` must be set to true for the request
to succeed.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_featurestore():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteFeaturestoreRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_featurestore(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteFeaturestoreRequest, dict]):
The request object. Request message for
@@ -722,6 +832,30 @@ async def create_entity_type(
) -> operation_async.AsyncOperation:
r"""Creates a new EntityType in a given Featurestore.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_entity_type():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.CreateEntityTypeRequest(
+ parent="parent_value",
+ entity_type_id="entity_type_id_value",
+ )
+
+ # Make the request
+ operation = client.create_entity_type(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateEntityTypeRequest, dict]):
The request object. Request message for
@@ -829,6 +963,25 @@ async def get_entity_type(
) -> entity_type.EntityType:
r"""Gets details of a single EntityType.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_entity_type():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetEntityTypeRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_entity_type(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetEntityTypeRequest, dict]):
The request object. Request message for
@@ -904,6 +1057,26 @@ async def list_entity_types(
) -> pagers.ListEntityTypesAsyncPager:
r"""Lists EntityTypes in a given Featurestore.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_entity_types():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListEntityTypesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_entity_types(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListEntityTypesRequest, dict]):
The request object. Request message for
@@ -986,6 +1159,24 @@ async def update_entity_type(
) -> gca_entity_type.EntityType:
r"""Updates the parameters of a single EntityType.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_update_entity_type():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.UpdateEntityTypeRequest(
+ )
+
+ # Make the request
+ response = client.update_entity_type(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.UpdateEntityTypeRequest, dict]):
The request object. Request message for
@@ -1089,6 +1280,30 @@ async def delete_entity_type(
Features or ``force`` must be set to true for the request to
succeed.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_entity_type():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteEntityTypeRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_entity_type(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteEntityTypeRequest, dict]):
The request object. Request message for
@@ -1195,6 +1410,34 @@ async def create_feature(
) -> operation_async.AsyncOperation:
r"""Creates a new Feature in a given EntityType.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_feature():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ feature = aiplatform_v1beta1.Feature()
+ feature.value_type = "BYTES"
+
+ request = aiplatform_v1beta1.CreateFeatureRequest(
+ parent="parent_value",
+ feature=feature,
+ feature_id="feature_id_value",
+ )
+
+ # Make the request
+ operation = client.create_feature(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest, dict]):
The request object. Request message for
@@ -1302,6 +1545,35 @@ async def batch_create_features(
) -> operation_async.AsyncOperation:
r"""Creates a batch of Features in a given EntityType.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_batch_create_features():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ requests = aiplatform_v1beta1.CreateFeatureRequest()
+ requests.parent = "parent_value"
+ requests.feature.value_type = "BYTES"
+ requests.feature_id = "feature_id_value"
+
+ request = aiplatform_v1beta1.BatchCreateFeaturesRequest(
+ parent="parent_value",
+ requests=requests,
+ )
+
+ # Make the request
+ operation = client.batch_create_features(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.BatchCreateFeaturesRequest, dict]):
The request object. Request message for
@@ -1399,6 +1671,25 @@ async def get_feature(
) -> feature.Feature:
r"""Gets details of a single Feature.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_feature():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetFeatureRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_feature(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetFeatureRequest, dict]):
The request object. Request message for
@@ -1473,6 +1764,26 @@ async def list_features(
) -> pagers.ListFeaturesAsyncPager:
r"""Lists Features in a given EntityType.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_features():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListFeaturesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_features(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListFeaturesRequest, dict]):
The request object. Request message for
@@ -1555,6 +1866,28 @@ async def update_feature(
) -> gca_feature.Feature:
r"""Updates the parameters of a single Feature.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_update_feature():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ feature = aiplatform_v1beta1.Feature()
+ feature.value_type = "BYTES"
+
+ request = aiplatform_v1beta1.UpdateFeatureRequest(
+ feature=feature,
+ )
+
+ # Make the request
+ response = client.update_feature(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.UpdateFeatureRequest, dict]):
The request object. Request message for
@@ -1654,6 +1987,29 @@ async def delete_feature(
) -> operation_async.AsyncOperation:
r"""Deletes a single Feature.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_feature():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteFeatureRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_feature(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteFeatureRequest, dict]):
The request object. Request message for
@@ -1767,6 +2123,39 @@ async def import_feature_values(
or retention policy.
- Online serving cluster is under-provisioned.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_import_feature_values():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ avro_source = aiplatform_v1beta1.AvroSource()
+ avro_source.gcs_source.uris = ['uris_value_1', 'uris_value_2']
+
+ feature_specs = aiplatform_v1beta1.FeatureSpec()
+ feature_specs.id = "id_value"
+
+ request = aiplatform_v1beta1.ImportFeatureValuesRequest(
+ avro_source=avro_source,
+ feature_time_field="feature_time_field_value",
+ entity_type="entity_type_value",
+ feature_specs=feature_specs,
+ )
+
+ # Make the request
+ operation = client.import_feature_values(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ImportFeatureValuesRequest, dict]):
The request object. Request message for
@@ -1859,6 +2248,43 @@ async def batch_read_feature_values(
correctness is guaranteed for Feature values of each
read instance as of each instance's read timestamp.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_batch_read_feature_values():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ csv_read_instances = aiplatform_v1beta1.CsvSource()
+ csv_read_instances.gcs_source.uris = ['uris_value_1', 'uris_value_2']
+
+ destination = aiplatform_v1beta1.FeatureValueDestination()
+ destination.bigquery_destination.output_uri = "output_uri_value"
+
+ entity_type_specs = aiplatform_v1beta1.EntityTypeSpec()
+ entity_type_specs.entity_type_id = "entity_type_id_value"
+ entity_type_specs.feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2']
+
+ request = aiplatform_v1beta1.BatchReadFeatureValuesRequest(
+ csv_read_instances=csv_read_instances,
+ featurestore="featurestore_value",
+ destination=destination,
+ entity_type_specs=entity_type_specs,
+ )
+
+ # Make the request
+ operation = client.batch_read_feature_values(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.BatchReadFeatureValuesRequest, dict]):
The request object. Request message for
@@ -1946,6 +2372,38 @@ async def export_feature_values(
r"""Exports Feature values from all the entities of a
target EntityType.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_export_feature_values():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ destination = aiplatform_v1beta1.FeatureValueDestination()
+ destination.bigquery_destination.output_uri = "output_uri_value"
+
+ feature_selector = aiplatform_v1beta1.FeatureSelector()
+ feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2']
+
+ request = aiplatform_v1beta1.ExportFeatureValuesRequest(
+ entity_type="entity_type_value",
+ destination=destination,
+ feature_selector=feature_selector,
+ )
+
+ # Make the request
+ operation = client.export_feature_values(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ExportFeatureValuesRequest, dict]):
The request object. Request message for
@@ -2034,6 +2492,27 @@ async def search_features(
r"""Searches Features matching a query in a given
project.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_search_features():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.SearchFeaturesRequest(
+ location="location_value",
+ )
+
+ # Make the request
+ page_result = client.search_features(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.SearchFeaturesRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py
index 12b0ab724c..1e86771c91 100644
--- a/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py
+++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py
@@ -475,6 +475,32 @@ def create_featurestore(
r"""Creates a new Featurestore in a given project and
location.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_featurestore():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.CreateFeaturestoreRequest(
+ parent="parent_value",
+ featurestore_id="featurestore_id_value",
+ )
+
+ # Make the request
+ operation = client.create_featurestore(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateFeaturestoreRequest, dict]):
The request object. Request message for
@@ -583,6 +609,26 @@ def get_featurestore(
) -> featurestore.Featurestore:
r"""Gets details of a single Featurestore.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_featurestore():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetFeaturestoreRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_featurestore(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetFeaturestoreRequest, dict]):
The request object. Request message for
@@ -657,6 +703,27 @@ def list_featurestores(
) -> pagers.ListFeaturestoresPager:
r"""Lists Featurestores in a given project and location.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_featurestores():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListFeaturestoresRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_featurestores(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListFeaturestoresRequest, dict]):
The request object. Request message for
@@ -739,6 +806,29 @@ def update_featurestore(
) -> gac_operation.Operation:
r"""Updates the parameters of a single Featurestore.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_update_featurestore():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.UpdateFeaturestoreRequest(
+ )
+
+ # Make the request
+ operation = client.update_featurestore(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.UpdateFeaturestoreRequest, dict]):
The request object. Request message for
@@ -848,6 +938,31 @@ def delete_featurestore(
any EntityTypes or ``force`` must be set to true for the request
to succeed.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_featurestore():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteFeaturestoreRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_featurestore(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteFeaturestoreRequest, dict]):
The request object. Request message for
@@ -955,6 +1070,31 @@ def create_entity_type(
) -> gac_operation.Operation:
r"""Creates a new EntityType in a given Featurestore.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_entity_type():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.CreateEntityTypeRequest(
+ parent="parent_value",
+ entity_type_id="entity_type_id_value",
+ )
+
+ # Make the request
+ operation = client.create_entity_type(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateEntityTypeRequest, dict]):
The request object. Request message for
@@ -1062,6 +1202,26 @@ def get_entity_type(
) -> entity_type.EntityType:
r"""Gets details of a single EntityType.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_entity_type():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetEntityTypeRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_entity_type(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetEntityTypeRequest, dict]):
The request object. Request message for
@@ -1137,6 +1297,27 @@ def list_entity_types(
) -> pagers.ListEntityTypesPager:
r"""Lists EntityTypes in a given Featurestore.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_entity_types():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListEntityTypesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_entity_types(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListEntityTypesRequest, dict]):
The request object. Request message for
@@ -1219,6 +1400,25 @@ def update_entity_type(
) -> gca_entity_type.EntityType:
r"""Updates the parameters of a single EntityType.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_update_entity_type():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.UpdateEntityTypeRequest(
+ )
+
+ # Make the request
+ response = client.update_entity_type(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.UpdateEntityTypeRequest, dict]):
The request object. Request message for
@@ -1322,6 +1522,31 @@ def delete_entity_type(
Features or ``force`` must be set to true for the request to
succeed.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_entity_type():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteEntityTypeRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_entity_type(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteEntityTypeRequest, dict]):
The request object. Request message for
@@ -1428,6 +1653,35 @@ def create_feature(
) -> gac_operation.Operation:
r"""Creates a new Feature in a given EntityType.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_feature():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ feature = aiplatform_v1beta1.Feature()
+ feature.value_type = "BYTES"
+
+ request = aiplatform_v1beta1.CreateFeatureRequest(
+ parent="parent_value",
+ feature=feature,
+ feature_id="feature_id_value",
+ )
+
+ # Make the request
+ operation = client.create_feature(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest, dict]):
The request object. Request message for
@@ -1535,6 +1789,36 @@ def batch_create_features(
) -> gac_operation.Operation:
r"""Creates a batch of Features in a given EntityType.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_batch_create_features():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ requests = aiplatform_v1beta1.CreateFeatureRequest()
+ requests.parent = "parent_value"
+ requests.feature.value_type = "BYTES"
+ requests.feature_id = "feature_id_value"
+
+ request = aiplatform_v1beta1.BatchCreateFeaturesRequest(
+ parent="parent_value",
+ requests=requests,
+ )
+
+ # Make the request
+ operation = client.batch_create_features(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.BatchCreateFeaturesRequest, dict]):
The request object. Request message for
@@ -1632,6 +1916,26 @@ def get_feature(
) -> feature.Feature:
r"""Gets details of a single Feature.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_feature():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetFeatureRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_feature(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetFeatureRequest, dict]):
The request object. Request message for
@@ -1706,6 +2010,27 @@ def list_features(
) -> pagers.ListFeaturesPager:
r"""Lists Features in a given EntityType.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_features():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListFeaturesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_features(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListFeaturesRequest, dict]):
The request object. Request message for
@@ -1788,6 +2113,29 @@ def update_feature(
) -> gca_feature.Feature:
r"""Updates the parameters of a single Feature.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_update_feature():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ feature = aiplatform_v1beta1.Feature()
+ feature.value_type = "BYTES"
+
+ request = aiplatform_v1beta1.UpdateFeatureRequest(
+ feature=feature,
+ )
+
+ # Make the request
+ response = client.update_feature(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.UpdateFeatureRequest, dict]):
The request object. Request message for
@@ -1887,6 +2235,30 @@ def delete_feature(
) -> gac_operation.Operation:
r"""Deletes a single Feature.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_feature():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteFeatureRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_feature(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteFeatureRequest, dict]):
The request object. Request message for
@@ -2000,6 +2372,40 @@ def import_feature_values(
or retention policy.
- Online serving cluster is under-provisioned.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_import_feature_values():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ avro_source = aiplatform_v1beta1.AvroSource()
+ avro_source.gcs_source.uris = ['uris_value_1', 'uris_value_2']
+
+ feature_specs = aiplatform_v1beta1.FeatureSpec()
+ feature_specs.id = "id_value"
+
+ request = aiplatform_v1beta1.ImportFeatureValuesRequest(
+ avro_source=avro_source,
+ feature_time_field="feature_time_field_value",
+ entity_type="entity_type_value",
+ feature_specs=feature_specs,
+ )
+
+ # Make the request
+ operation = client.import_feature_values(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ImportFeatureValuesRequest, dict]):
The request object. Request message for
@@ -2092,6 +2498,44 @@ def batch_read_feature_values(
correctness is guaranteed for Feature values of each
read instance as of each instance's read timestamp.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_batch_read_feature_values():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ csv_read_instances = aiplatform_v1beta1.CsvSource()
+ csv_read_instances.gcs_source.uris = ['uris_value_1', 'uris_value_2']
+
+ destination = aiplatform_v1beta1.FeatureValueDestination()
+ destination.bigquery_destination.output_uri = "output_uri_value"
+
+ entity_type_specs = aiplatform_v1beta1.EntityTypeSpec()
+ entity_type_specs.entity_type_id = "entity_type_id_value"
+ entity_type_specs.feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2']
+
+ request = aiplatform_v1beta1.BatchReadFeatureValuesRequest(
+ csv_read_instances=csv_read_instances,
+ featurestore="featurestore_value",
+ destination=destination,
+ entity_type_specs=entity_type_specs,
+ )
+
+ # Make the request
+ operation = client.batch_read_feature_values(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.BatchReadFeatureValuesRequest, dict]):
The request object. Request message for
@@ -2181,6 +2625,39 @@ def export_feature_values(
r"""Exports Feature values from all the entities of a
target EntityType.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_export_feature_values():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ destination = aiplatform_v1beta1.FeatureValueDestination()
+ destination.bigquery_destination.output_uri = "output_uri_value"
+
+ feature_selector = aiplatform_v1beta1.FeatureSelector()
+ feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2']
+
+ request = aiplatform_v1beta1.ExportFeatureValuesRequest(
+ entity_type="entity_type_value",
+ destination=destination,
+ feature_selector=feature_selector,
+ )
+
+ # Make the request
+ operation = client.export_feature_values(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ExportFeatureValuesRequest, dict]):
The request object. Request message for
@@ -2269,6 +2746,28 @@ def search_features(
r"""Searches Features matching a query in a given
project.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_search_features():
+ # Create a client
+ client = aiplatform_v1beta1.FeaturestoreServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.SearchFeaturesRequest(
+ location="location_value",
+ )
+
+ # Make the request
+ page_result = client.search_features(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.SearchFeaturesRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py
index 17d4aba21e..918b8d7b4b 100644
--- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py
+++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py
@@ -227,6 +227,33 @@ async def create_index_endpoint(
) -> operation_async.AsyncOperation:
r"""Creates an IndexEndpoint.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_index_endpoint():
+ # Create a client
+ client = aiplatform_v1beta1.IndexEndpointServiceClient()
+
+ # Initialize request argument(s)
+ index_endpoint = aiplatform_v1beta1.IndexEndpoint()
+ index_endpoint.display_name = "display_name_value"
+
+ request = aiplatform_v1beta1.CreateIndexEndpointRequest(
+ parent="parent_value",
+ index_endpoint=index_endpoint,
+ )
+
+ # Make the request
+ operation = client.create_index_endpoint(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateIndexEndpointRequest, dict]):
The request object. Request message for
@@ -318,6 +345,25 @@ async def get_index_endpoint(
) -> index_endpoint.IndexEndpoint:
r"""Gets an IndexEndpoint.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_index_endpoint():
+ # Create a client
+ client = aiplatform_v1beta1.IndexEndpointServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetIndexEndpointRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_index_endpoint(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetIndexEndpointRequest, dict]):
The request object. Request message for
@@ -391,6 +437,26 @@ async def list_index_endpoints(
) -> pagers.ListIndexEndpointsAsyncPager:
r"""Lists IndexEndpoints in a Location.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_index_endpoints():
+ # Create a client
+ client = aiplatform_v1beta1.IndexEndpointServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListIndexEndpointsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_index_endpoints(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsRequest, dict]):
The request object. Request message for
@@ -473,6 +539,28 @@ async def update_index_endpoint(
) -> gca_index_endpoint.IndexEndpoint:
r"""Updates an IndexEndpoint.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_update_index_endpoint():
+ # Create a client
+ client = aiplatform_v1beta1.IndexEndpointServiceClient()
+
+ # Initialize request argument(s)
+ index_endpoint = aiplatform_v1beta1.IndexEndpoint()
+ index_endpoint.display_name = "display_name_value"
+
+ request = aiplatform_v1beta1.UpdateIndexEndpointRequest(
+ index_endpoint=index_endpoint,
+ )
+
+ # Make the request
+ response = client.update_index_endpoint(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.UpdateIndexEndpointRequest, dict]):
The request object. Request message for
@@ -556,6 +644,29 @@ async def delete_index_endpoint(
) -> operation_async.AsyncOperation:
r"""Deletes an IndexEndpoint.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_index_endpoint():
+ # Create a client
+ client = aiplatform_v1beta1.IndexEndpointServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteIndexEndpointRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_index_endpoint(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteIndexEndpointRequest, dict]):
The request object. Request message for
@@ -652,6 +763,35 @@ async def deploy_index(
DeployedIndex within it.
Only non-empty Indexes can be deployed.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_deploy_index():
+ # Create a client
+ client = aiplatform_v1beta1.IndexEndpointServiceClient()
+
+ # Initialize request argument(s)
+ deployed_index = aiplatform_v1beta1.DeployedIndex()
+ deployed_index.id = "id_value"
+ deployed_index.index = "index_value"
+
+ request = aiplatform_v1beta1.DeployIndexRequest(
+ index_endpoint="index_endpoint_value",
+ deployed_index=deployed_index,
+ )
+
+ # Make the request
+ operation = client.deploy_index(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeployIndexRequest, dict]):
The request object. Request message for
@@ -750,6 +890,31 @@ async def undeploy_index(
DeployedIndex from it, and freeing all resources it's
using.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_undeploy_index():
+ # Create a client
+ client = aiplatform_v1beta1.IndexEndpointServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.UndeployIndexRequest(
+ index_endpoint="index_endpoint_value",
+ deployed_index_id="deployed_index_id_value",
+ )
+
+ # Make the request
+ operation = client.undeploy_index(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.UndeployIndexRequest, dict]):
The request object. Request message for
@@ -847,6 +1012,35 @@ async def mutate_deployed_index(
r"""Update an existing DeployedIndex under an
IndexEndpoint.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_mutate_deployed_index():
+ # Create a client
+ client = aiplatform_v1beta1.IndexEndpointServiceClient()
+
+ # Initialize request argument(s)
+ deployed_index = aiplatform_v1beta1.DeployedIndex()
+ deployed_index.id = "id_value"
+ deployed_index.index = "index_value"
+
+ request = aiplatform_v1beta1.MutateDeployedIndexRequest(
+ index_endpoint="index_endpoint_value",
+ deployed_index=deployed_index,
+ )
+
+ # Make the request
+ operation = client.mutate_deployed_index(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.MutateDeployedIndexRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py
index 99f1480e27..2b8a63bcd4 100644
--- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py
+++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py
@@ -437,6 +437,34 @@ def create_index_endpoint(
) -> gac_operation.Operation:
r"""Creates an IndexEndpoint.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_index_endpoint():
+ # Create a client
+ client = aiplatform_v1beta1.IndexEndpointServiceClient()
+
+ # Initialize request argument(s)
+ index_endpoint = aiplatform_v1beta1.IndexEndpoint()
+ index_endpoint.display_name = "display_name_value"
+
+ request = aiplatform_v1beta1.CreateIndexEndpointRequest(
+ parent="parent_value",
+ index_endpoint=index_endpoint,
+ )
+
+ # Make the request
+ operation = client.create_index_endpoint(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateIndexEndpointRequest, dict]):
The request object. Request message for
@@ -528,6 +556,26 @@ def get_index_endpoint(
) -> index_endpoint.IndexEndpoint:
r"""Gets an IndexEndpoint.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_index_endpoint():
+ # Create a client
+ client = aiplatform_v1beta1.IndexEndpointServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetIndexEndpointRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_index_endpoint(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetIndexEndpointRequest, dict]):
The request object. Request message for
@@ -601,6 +649,27 @@ def list_index_endpoints(
) -> pagers.ListIndexEndpointsPager:
r"""Lists IndexEndpoints in a Location.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_index_endpoints():
+ # Create a client
+ client = aiplatform_v1beta1.IndexEndpointServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListIndexEndpointsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_index_endpoints(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsRequest, dict]):
The request object. Request message for
@@ -683,6 +752,29 @@ def update_index_endpoint(
) -> gca_index_endpoint.IndexEndpoint:
r"""Updates an IndexEndpoint.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_update_index_endpoint():
+ # Create a client
+ client = aiplatform_v1beta1.IndexEndpointServiceClient()
+
+ # Initialize request argument(s)
+ index_endpoint = aiplatform_v1beta1.IndexEndpoint()
+ index_endpoint.display_name = "display_name_value"
+
+ request = aiplatform_v1beta1.UpdateIndexEndpointRequest(
+ index_endpoint=index_endpoint,
+ )
+
+ # Make the request
+ response = client.update_index_endpoint(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.UpdateIndexEndpointRequest, dict]):
The request object. Request message for
@@ -766,6 +858,30 @@ def delete_index_endpoint(
) -> gac_operation.Operation:
r"""Deletes an IndexEndpoint.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_index_endpoint():
+ # Create a client
+ client = aiplatform_v1beta1.IndexEndpointServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteIndexEndpointRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_index_endpoint(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteIndexEndpointRequest, dict]):
The request object. Request message for
@@ -862,6 +978,36 @@ def deploy_index(
DeployedIndex within it.
Only non-empty Indexes can be deployed.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_deploy_index():
+ # Create a client
+ client = aiplatform_v1beta1.IndexEndpointServiceClient()
+
+ # Initialize request argument(s)
+ deployed_index = aiplatform_v1beta1.DeployedIndex()
+ deployed_index.id = "id_value"
+ deployed_index.index = "index_value"
+
+ request = aiplatform_v1beta1.DeployIndexRequest(
+ index_endpoint="index_endpoint_value",
+ deployed_index=deployed_index,
+ )
+
+ # Make the request
+ operation = client.deploy_index(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeployIndexRequest, dict]):
The request object. Request message for
@@ -960,6 +1106,32 @@ def undeploy_index(
DeployedIndex from it, and freeing all resources it's
using.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_undeploy_index():
+ # Create a client
+ client = aiplatform_v1beta1.IndexEndpointServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.UndeployIndexRequest(
+ index_endpoint="index_endpoint_value",
+ deployed_index_id="deployed_index_id_value",
+ )
+
+ # Make the request
+ operation = client.undeploy_index(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.UndeployIndexRequest, dict]):
The request object. Request message for
@@ -1057,6 +1229,36 @@ def mutate_deployed_index(
r"""Update an existing DeployedIndex under an
IndexEndpoint.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_mutate_deployed_index():
+ # Create a client
+ client = aiplatform_v1beta1.IndexEndpointServiceClient()
+
+ # Initialize request argument(s)
+ deployed_index = aiplatform_v1beta1.DeployedIndex()
+ deployed_index.id = "id_value"
+ deployed_index.index = "index_value"
+
+ request = aiplatform_v1beta1.MutateDeployedIndexRequest(
+ index_endpoint="index_endpoint_value",
+ deployed_index=deployed_index,
+ )
+
+ # Make the request
+ operation = client.mutate_deployed_index(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.MutateDeployedIndexRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1beta1/services/index_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/index_service/async_client.py
index 87214d756d..96ce14bfdf 100644
--- a/google/cloud/aiplatform_v1beta1/services/index_service/async_client.py
+++ b/google/cloud/aiplatform_v1beta1/services/index_service/async_client.py
@@ -226,6 +226,33 @@ async def create_index(
) -> operation_async.AsyncOperation:
r"""Creates an Index.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_index():
+ # Create a client
+ client = aiplatform_v1beta1.IndexServiceClient()
+
+ # Initialize request argument(s)
+ index = aiplatform_v1beta1.Index()
+ index.display_name = "display_name_value"
+
+ request = aiplatform_v1beta1.CreateIndexRequest(
+ parent="parent_value",
+ index=index,
+ )
+
+ # Make the request
+ operation = client.create_index(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateIndexRequest, dict]):
The request object. Request message for
@@ -316,6 +343,25 @@ async def get_index(
) -> index.Index:
r"""Gets an Index.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_index():
+ # Create a client
+ client = aiplatform_v1beta1.IndexServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetIndexRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_index(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetIndexRequest, dict]):
The request object. Request message for
@@ -389,6 +435,26 @@ async def list_indexes(
) -> pagers.ListIndexesAsyncPager:
r"""Lists Indexes in a Location.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_indexes():
+ # Create a client
+ client = aiplatform_v1beta1.IndexServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListIndexesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_indexes(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListIndexesRequest, dict]):
The request object. Request message for
@@ -471,6 +537,32 @@ async def update_index(
) -> operation_async.AsyncOperation:
r"""Updates an Index.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_update_index():
+ # Create a client
+ client = aiplatform_v1beta1.IndexServiceClient()
+
+ # Initialize request argument(s)
+ index = aiplatform_v1beta1.Index()
+ index.display_name = "display_name_value"
+
+ request = aiplatform_v1beta1.UpdateIndexRequest(
+ index=index,
+ )
+
+ # Make the request
+ operation = client.update_index(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.UpdateIndexRequest, dict]):
The request object. Request message for
@@ -567,6 +659,30 @@ async def delete_index(
[DeployedIndexes][google.cloud.aiplatform.v1beta1.Index.deployed_indexes]
had been undeployed.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_index():
+ # Create a client
+ client = aiplatform_v1beta1.IndexServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteIndexRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_index(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteIndexRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1beta1/services/index_service/client.py b/google/cloud/aiplatform_v1beta1/services/index_service/client.py
index da3dc18d1f..dfbff350cb 100644
--- a/google/cloud/aiplatform_v1beta1/services/index_service/client.py
+++ b/google/cloud/aiplatform_v1beta1/services/index_service/client.py
@@ -437,6 +437,34 @@ def create_index(
) -> gac_operation.Operation:
r"""Creates an Index.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_index():
+ # Create a client
+ client = aiplatform_v1beta1.IndexServiceClient()
+
+ # Initialize request argument(s)
+ index = aiplatform_v1beta1.Index()
+ index.display_name = "display_name_value"
+
+ request = aiplatform_v1beta1.CreateIndexRequest(
+ parent="parent_value",
+ index=index,
+ )
+
+ # Make the request
+ operation = client.create_index(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateIndexRequest, dict]):
The request object. Request message for
@@ -527,6 +555,26 @@ def get_index(
) -> index.Index:
r"""Gets an Index.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_index():
+ # Create a client
+ client = aiplatform_v1beta1.IndexServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetIndexRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_index(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetIndexRequest, dict]):
The request object. Request message for
@@ -600,6 +648,27 @@ def list_indexes(
) -> pagers.ListIndexesPager:
r"""Lists Indexes in a Location.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_indexes():
+ # Create a client
+ client = aiplatform_v1beta1.IndexServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListIndexesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_indexes(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListIndexesRequest, dict]):
The request object. Request message for
@@ -682,6 +751,33 @@ def update_index(
) -> gac_operation.Operation:
r"""Updates an Index.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_update_index():
+ # Create a client
+ client = aiplatform_v1beta1.IndexServiceClient()
+
+ # Initialize request argument(s)
+ index = aiplatform_v1beta1.Index()
+ index.display_name = "display_name_value"
+
+ request = aiplatform_v1beta1.UpdateIndexRequest(
+ index=index,
+ )
+
+ # Make the request
+ operation = client.update_index(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.UpdateIndexRequest, dict]):
The request object. Request message for
@@ -778,6 +874,31 @@ def delete_index(
[DeployedIndexes][google.cloud.aiplatform.v1beta1.Index.deployed_indexes]
had been undeployed.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_index():
+ # Create a client
+ client = aiplatform_v1beta1.IndexServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteIndexRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_index(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteIndexRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py
index ae2989cefe..c79495e4cc 100644
--- a/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py
+++ b/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py
@@ -279,6 +279,31 @@ async def create_custom_job(
r"""Creates a CustomJob. A created CustomJob right away
will be attempted to be run.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_custom_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ custom_job = aiplatform_v1beta1.CustomJob()
+ custom_job.display_name = "display_name_value"
+ custom_job.job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value"
+
+ request = aiplatform_v1beta1.CreateCustomJobRequest(
+ parent="parent_value",
+ custom_job=custom_job,
+ )
+
+ # Make the request
+ response = client.create_custom_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateCustomJobRequest, dict]):
The request object. Request message for
@@ -364,6 +389,25 @@ async def get_custom_job(
) -> custom_job.CustomJob:
r"""Gets a CustomJob.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_custom_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetCustomJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_custom_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetCustomJobRequest, dict]):
The request object. Request message for
@@ -441,6 +485,26 @@ async def list_custom_jobs(
) -> pagers.ListCustomJobsAsyncPager:
r"""Lists CustomJobs in a Location.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_custom_jobs():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListCustomJobsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_custom_jobs(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListCustomJobsRequest, dict]):
The request object. Request message for
@@ -522,6 +586,29 @@ async def delete_custom_job(
) -> operation_async.AsyncOperation:
r"""Deletes a CustomJob.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_custom_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteCustomJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_custom_job(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteCustomJobRequest, dict]):
The request object. Request message for
@@ -627,6 +714,23 @@ async def cancel_custom_job(
[CustomJob.state][google.cloud.aiplatform.v1beta1.CustomJob.state]
is set to ``CANCELLED``.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_cancel_custom_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.CancelCustomJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.cancel_custom_job(request=request)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CancelCustomJobRequest, dict]):
The request object. Request message for
@@ -692,6 +796,34 @@ async def create_data_labeling_job(
) -> gca_data_labeling_job.DataLabelingJob:
r"""Creates a DataLabelingJob.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_data_labeling_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ data_labeling_job = aiplatform_v1beta1.DataLabelingJob()
+ data_labeling_job.display_name = "display_name_value"
+ data_labeling_job.datasets = ['datasets_value_1', 'datasets_value_2']
+ data_labeling_job.labeler_count = 1375
+ data_labeling_job.instruction_uri = "instruction_uri_value"
+ data_labeling_job.inputs_schema_uri = "inputs_schema_uri_value"
+ data_labeling_job.inputs.null_value = "NULL_VALUE"
+
+ request = aiplatform_v1beta1.CreateDataLabelingJobRequest(
+ parent="parent_value",
+ data_labeling_job=data_labeling_job,
+ )
+
+ # Make the request
+ response = client.create_data_labeling_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateDataLabelingJobRequest, dict]):
The request object. Request message for
@@ -773,6 +905,25 @@ async def get_data_labeling_job(
) -> data_labeling_job.DataLabelingJob:
r"""Gets a DataLabelingJob.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_data_labeling_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetDataLabelingJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_data_labeling_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetDataLabelingJobRequest, dict]):
The request object. Request message for
@@ -845,6 +996,26 @@ async def list_data_labeling_jobs(
) -> pagers.ListDataLabelingJobsAsyncPager:
r"""Lists DataLabelingJobs in a Location.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_data_labeling_jobs():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListDataLabelingJobsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_data_labeling_jobs(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsRequest, dict]):
The request object. Request message for
@@ -925,6 +1096,29 @@ async def delete_data_labeling_job(
) -> operation_async.AsyncOperation:
r"""Deletes a DataLabelingJob.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_data_labeling_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteDataLabelingJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_data_labeling_job(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteDataLabelingJobRequest, dict]):
The request object. Request message for
@@ -1019,6 +1213,23 @@ async def cancel_data_labeling_job(
r"""Cancels a DataLabelingJob. Success of cancellation is
not guaranteed.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_cancel_data_labeling_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.CancelDataLabelingJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.cancel_data_labeling_job(request=request)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CancelDataLabelingJobRequest, dict]):
The request object. Request message for
@@ -1084,6 +1295,37 @@ async def create_hyperparameter_tuning_job(
) -> gca_hyperparameter_tuning_job.HyperparameterTuningJob:
r"""Creates a HyperparameterTuningJob
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_hyperparameter_tuning_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ hyperparameter_tuning_job = aiplatform_v1beta1.HyperparameterTuningJob()
+ hyperparameter_tuning_job.display_name = "display_name_value"
+ hyperparameter_tuning_job.study_spec.metrics.metric_id = "metric_id_value"
+ hyperparameter_tuning_job.study_spec.metrics.goal = "MINIMIZE"
+ hyperparameter_tuning_job.study_spec.parameters.double_value_spec.min_value = 0.96
+ hyperparameter_tuning_job.study_spec.parameters.double_value_spec.max_value = 0.962
+ hyperparameter_tuning_job.study_spec.parameters.parameter_id = "parameter_id_value"
+ hyperparameter_tuning_job.max_trial_count = 1609
+ hyperparameter_tuning_job.parallel_trial_count = 2128
+ hyperparameter_tuning_job.trial_job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value"
+
+ request = aiplatform_v1beta1.CreateHyperparameterTuningJobRequest(
+ parent="parent_value",
+ hyperparameter_tuning_job=hyperparameter_tuning_job,
+ )
+
+ # Make the request
+ response = client.create_hyperparameter_tuning_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateHyperparameterTuningJobRequest, dict]):
The request object. Request message for
@@ -1167,6 +1409,25 @@ async def get_hyperparameter_tuning_job(
) -> hyperparameter_tuning_job.HyperparameterTuningJob:
r"""Gets a HyperparameterTuningJob
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_hyperparameter_tuning_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetHyperparameterTuningJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_hyperparameter_tuning_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetHyperparameterTuningJobRequest, dict]):
The request object. Request message for
@@ -1241,6 +1502,26 @@ async def list_hyperparameter_tuning_jobs(
) -> pagers.ListHyperparameterTuningJobsAsyncPager:
r"""Lists HyperparameterTuningJobs in a Location.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_hyperparameter_tuning_jobs():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListHyperparameterTuningJobsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_hyperparameter_tuning_jobs(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsRequest, dict]):
The request object. Request message for
@@ -1322,6 +1603,29 @@ async def delete_hyperparameter_tuning_job(
) -> operation_async.AsyncOperation:
r"""Deletes a HyperparameterTuningJob.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_hyperparameter_tuning_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteHyperparameterTuningJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_hyperparameter_tuning_job(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteHyperparameterTuningJobRequest, dict]):
The request object. Request message for
@@ -1428,6 +1732,23 @@ async def cancel_hyperparameter_tuning_job(
[HyperparameterTuningJob.state][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.state]
is set to ``CANCELLED``.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_cancel_hyperparameter_tuning_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.CancelHyperparameterTuningJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.cancel_hyperparameter_tuning_job(request=request)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CancelHyperparameterTuningJobRequest, dict]):
The request object. Request message for
@@ -1495,6 +1816,34 @@ async def create_batch_prediction_job(
r"""Creates a BatchPredictionJob. A BatchPredictionJob
once created will right away be attempted to start.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_batch_prediction_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ batch_prediction_job = aiplatform_v1beta1.BatchPredictionJob()
+ batch_prediction_job.display_name = "display_name_value"
+ batch_prediction_job.input_config.gcs_source.uris = ['uris_value_1', 'uris_value_2']
+ batch_prediction_job.input_config.instances_format = "instances_format_value"
+ batch_prediction_job.output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value"
+ batch_prediction_job.output_config.predictions_format = "predictions_format_value"
+
+ request = aiplatform_v1beta1.CreateBatchPredictionJobRequest(
+ parent="parent_value",
+ batch_prediction_job=batch_prediction_job,
+ )
+
+ # Make the request
+ response = client.create_batch_prediction_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateBatchPredictionJobRequest, dict]):
The request object. Request message for
@@ -1580,6 +1929,25 @@ async def get_batch_prediction_job(
) -> batch_prediction_job.BatchPredictionJob:
r"""Gets a BatchPredictionJob
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_batch_prediction_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetBatchPredictionJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_batch_prediction_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetBatchPredictionJobRequest, dict]):
The request object. Request message for
@@ -1656,6 +2024,26 @@ async def list_batch_prediction_jobs(
) -> pagers.ListBatchPredictionJobsAsyncPager:
r"""Lists BatchPredictionJobs in a Location.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_batch_prediction_jobs():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListBatchPredictionJobsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_batch_prediction_jobs(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsRequest, dict]):
The request object. Request message for
@@ -1738,6 +2126,30 @@ async def delete_batch_prediction_job(
r"""Deletes a BatchPredictionJob. Can only be called on
jobs that already finished.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_batch_prediction_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteBatchPredictionJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_batch_prediction_job(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteBatchPredictionJobRequest, dict]):
The request object. Request message for
@@ -1842,6 +2254,23 @@ async def cancel_batch_prediction_job(
is set to ``CANCELLED``. Any files already outputted by the job
are not deleted.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_cancel_batch_prediction_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.CancelBatchPredictionJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.cancel_batch_prediction_job(request=request)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CancelBatchPredictionJobRequest, dict]):
The request object. Request message for
@@ -1911,6 +2340,31 @@ async def create_model_deployment_monitoring_job(
r"""Creates a ModelDeploymentMonitoringJob. It will run
periodically on a configured interval.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_model_deployment_monitoring_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ model_deployment_monitoring_job = aiplatform_v1beta1.ModelDeploymentMonitoringJob()
+ model_deployment_monitoring_job.display_name = "display_name_value"
+ model_deployment_monitoring_job.endpoint = "endpoint_value"
+
+ request = aiplatform_v1beta1.CreateModelDeploymentMonitoringJobRequest(
+ parent="parent_value",
+ model_deployment_monitoring_job=model_deployment_monitoring_job,
+ )
+
+ # Make the request
+ response = client.create_model_deployment_monitoring_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateModelDeploymentMonitoringJobRequest, dict]):
The request object. Request message for
@@ -1999,6 +2453,28 @@ async def search_model_deployment_monitoring_stats_anomalies(
r"""Searches Model Monitoring Statistics generated within
a given time window.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_search_model_deployment_monitoring_stats_anomalies():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.SearchModelDeploymentMonitoringStatsAnomaliesRequest(
+ model_deployment_monitoring_job="model_deployment_monitoring_job_value",
+ deployed_model_id="deployed_model_id_value",
+ )
+
+ # Make the request
+ page_result = client.search_model_deployment_monitoring_stats_anomalies(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest, dict]):
The request object. Request message for
@@ -2006,7 +2482,7 @@ async def search_model_deployment_monitoring_stats_anomalies(
model_deployment_monitoring_job (:class:`str`):
Required. ModelDeploymentMonitoring Job resource name.
Format:
- \`projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}
+ ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}``
This corresponds to the ``model_deployment_monitoring_job`` field
on the ``request`` instance; if ``request`` is provided, this
@@ -2098,6 +2574,25 @@ async def get_model_deployment_monitoring_job(
) -> model_deployment_monitoring_job.ModelDeploymentMonitoringJob:
r"""Gets a ModelDeploymentMonitoringJob.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_model_deployment_monitoring_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetModelDeploymentMonitoringJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_model_deployment_monitoring_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetModelDeploymentMonitoringJobRequest, dict]):
The request object. Request message for
@@ -2175,6 +2670,26 @@ async def list_model_deployment_monitoring_jobs(
) -> pagers.ListModelDeploymentMonitoringJobsAsyncPager:
r"""Lists ModelDeploymentMonitoringJobs in a Location.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_model_deployment_monitoring_jobs():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListModelDeploymentMonitoringJobsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_model_deployment_monitoring_jobs(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsRequest, dict]):
The request object. Request message for
@@ -2259,6 +2774,33 @@ async def update_model_deployment_monitoring_job(
) -> operation_async.AsyncOperation:
r"""Updates a ModelDeploymentMonitoringJob.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_update_model_deployment_monitoring_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ model_deployment_monitoring_job = aiplatform_v1beta1.ModelDeploymentMonitoringJob()
+ model_deployment_monitoring_job.display_name = "display_name_value"
+ model_deployment_monitoring_job.endpoint = "endpoint_value"
+
+ request = aiplatform_v1beta1.UpdateModelDeploymentMonitoringJobRequest(
+ model_deployment_monitoring_job=model_deployment_monitoring_job,
+ )
+
+ # Make the request
+ operation = client.update_model_deployment_monitoring_job(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.UpdateModelDeploymentMonitoringJobRequest, dict]):
The request object. Request message for
@@ -2386,6 +2928,29 @@ async def delete_model_deployment_monitoring_job(
) -> operation_async.AsyncOperation:
r"""Deletes a ModelDeploymentMonitoringJob.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_model_deployment_monitoring_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteModelDeploymentMonitoringJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_model_deployment_monitoring_job(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteModelDeploymentMonitoringJobRequest, dict]):
The request object. Request message for
@@ -2484,6 +3049,23 @@ async def pause_model_deployment_monitoring_job(
[ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.state]
to 'PAUSED'.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_pause_model_deployment_monitoring_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.PauseModelDeploymentMonitoringJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.pause_model_deployment_monitoring_job(request=request)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.PauseModelDeploymentMonitoringJobRequest, dict]):
The request object. Request message for
@@ -2553,6 +3135,23 @@ async def resume_model_deployment_monitoring_job(
will start to run from next scheduled time. A deleted
ModelDeploymentMonitoringJob can't be resumed.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_resume_model_deployment_monitoring_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ResumeModelDeploymentMonitoringJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.resume_model_deployment_monitoring_job(request=request)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ResumeModelDeploymentMonitoringJobRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/client.py b/google/cloud/aiplatform_v1beta1/services/job_service/client.py
index 85e571b49e..262c87baf5 100644
--- a/google/cloud/aiplatform_v1beta1/services/job_service/client.py
+++ b/google/cloud/aiplatform_v1beta1/services/job_service/client.py
@@ -621,6 +621,32 @@ def create_custom_job(
r"""Creates a CustomJob. A created CustomJob right away
will be attempted to be run.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_custom_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ custom_job = aiplatform_v1beta1.CustomJob()
+ custom_job.display_name = "display_name_value"
+ custom_job.job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value"
+
+ request = aiplatform_v1beta1.CreateCustomJobRequest(
+ parent="parent_value",
+ custom_job=custom_job,
+ )
+
+ # Make the request
+ response = client.create_custom_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateCustomJobRequest, dict]):
The request object. Request message for
@@ -706,6 +732,26 @@ def get_custom_job(
) -> custom_job.CustomJob:
r"""Gets a CustomJob.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_custom_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetCustomJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_custom_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetCustomJobRequest, dict]):
The request object. Request message for
@@ -783,6 +829,27 @@ def list_custom_jobs(
) -> pagers.ListCustomJobsPager:
r"""Lists CustomJobs in a Location.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_custom_jobs():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListCustomJobsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_custom_jobs(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListCustomJobsRequest, dict]):
The request object. Request message for
@@ -864,6 +931,30 @@ def delete_custom_job(
) -> gac_operation.Operation:
r"""Deletes a CustomJob.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_custom_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteCustomJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_custom_job(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteCustomJobRequest, dict]):
The request object. Request message for
@@ -969,6 +1060,24 @@ def cancel_custom_job(
[CustomJob.state][google.cloud.aiplatform.v1beta1.CustomJob.state]
is set to ``CANCELLED``.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_cancel_custom_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.CancelCustomJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.cancel_custom_job(request=request)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CancelCustomJobRequest, dict]):
The request object. Request message for
@@ -1034,6 +1143,35 @@ def create_data_labeling_job(
) -> gca_data_labeling_job.DataLabelingJob:
r"""Creates a DataLabelingJob.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_data_labeling_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ data_labeling_job = aiplatform_v1beta1.DataLabelingJob()
+ data_labeling_job.display_name = "display_name_value"
+ data_labeling_job.datasets = ['datasets_value_1', 'datasets_value_2']
+ data_labeling_job.labeler_count = 1375
+ data_labeling_job.instruction_uri = "instruction_uri_value"
+ data_labeling_job.inputs_schema_uri = "inputs_schema_uri_value"
+ data_labeling_job.inputs.null_value = "NULL_VALUE"
+
+ request = aiplatform_v1beta1.CreateDataLabelingJobRequest(
+ parent="parent_value",
+ data_labeling_job=data_labeling_job,
+ )
+
+ # Make the request
+ response = client.create_data_labeling_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateDataLabelingJobRequest, dict]):
The request object. Request message for
@@ -1115,6 +1253,26 @@ def get_data_labeling_job(
) -> data_labeling_job.DataLabelingJob:
r"""Gets a DataLabelingJob.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_data_labeling_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetDataLabelingJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_data_labeling_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetDataLabelingJobRequest, dict]):
The request object. Request message for
@@ -1187,6 +1345,27 @@ def list_data_labeling_jobs(
) -> pagers.ListDataLabelingJobsPager:
r"""Lists DataLabelingJobs in a Location.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_data_labeling_jobs():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListDataLabelingJobsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_data_labeling_jobs(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsRequest, dict]):
The request object. Request message for
@@ -1267,6 +1446,30 @@ def delete_data_labeling_job(
) -> gac_operation.Operation:
r"""Deletes a DataLabelingJob.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_data_labeling_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteDataLabelingJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_data_labeling_job(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteDataLabelingJobRequest, dict]):
The request object. Request message for
@@ -1361,6 +1564,24 @@ def cancel_data_labeling_job(
r"""Cancels a DataLabelingJob. Success of cancellation is
not guaranteed.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_cancel_data_labeling_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.CancelDataLabelingJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.cancel_data_labeling_job(request=request)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CancelDataLabelingJobRequest, dict]):
The request object. Request message for
@@ -1426,6 +1647,38 @@ def create_hyperparameter_tuning_job(
) -> gca_hyperparameter_tuning_job.HyperparameterTuningJob:
r"""Creates a HyperparameterTuningJob
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_hyperparameter_tuning_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ hyperparameter_tuning_job = aiplatform_v1beta1.HyperparameterTuningJob()
+ hyperparameter_tuning_job.display_name = "display_name_value"
+ hyperparameter_tuning_job.study_spec.metrics.metric_id = "metric_id_value"
+ hyperparameter_tuning_job.study_spec.metrics.goal = "MINIMIZE"
+ hyperparameter_tuning_job.study_spec.parameters.double_value_spec.min_value = 0.96
+ hyperparameter_tuning_job.study_spec.parameters.double_value_spec.max_value = 0.962
+ hyperparameter_tuning_job.study_spec.parameters.parameter_id = "parameter_id_value"
+ hyperparameter_tuning_job.max_trial_count = 1609
+ hyperparameter_tuning_job.parallel_trial_count = 2128
+ hyperparameter_tuning_job.trial_job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value"
+
+ request = aiplatform_v1beta1.CreateHyperparameterTuningJobRequest(
+ parent="parent_value",
+ hyperparameter_tuning_job=hyperparameter_tuning_job,
+ )
+
+ # Make the request
+ response = client.create_hyperparameter_tuning_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateHyperparameterTuningJobRequest, dict]):
The request object. Request message for
@@ -1511,6 +1764,26 @@ def get_hyperparameter_tuning_job(
) -> hyperparameter_tuning_job.HyperparameterTuningJob:
r"""Gets a HyperparameterTuningJob
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_hyperparameter_tuning_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetHyperparameterTuningJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_hyperparameter_tuning_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetHyperparameterTuningJobRequest, dict]):
The request object. Request message for
@@ -1587,6 +1860,27 @@ def list_hyperparameter_tuning_jobs(
) -> pagers.ListHyperparameterTuningJobsPager:
r"""Lists HyperparameterTuningJobs in a Location.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_hyperparameter_tuning_jobs():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListHyperparameterTuningJobsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_hyperparameter_tuning_jobs(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsRequest, dict]):
The request object. Request message for
@@ -1670,6 +1964,30 @@ def delete_hyperparameter_tuning_job(
) -> gac_operation.Operation:
r"""Deletes a HyperparameterTuningJob.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_hyperparameter_tuning_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteHyperparameterTuningJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_hyperparameter_tuning_job(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteHyperparameterTuningJobRequest, dict]):
The request object. Request message for
@@ -1778,6 +2096,24 @@ def cancel_hyperparameter_tuning_job(
[HyperparameterTuningJob.state][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.state]
is set to ``CANCELLED``.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_cancel_hyperparameter_tuning_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.CancelHyperparameterTuningJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.cancel_hyperparameter_tuning_job(request=request)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CancelHyperparameterTuningJobRequest, dict]):
The request object. Request message for
@@ -1847,6 +2183,35 @@ def create_batch_prediction_job(
r"""Creates a BatchPredictionJob. A BatchPredictionJob
once created will right away be attempted to start.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_batch_prediction_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ batch_prediction_job = aiplatform_v1beta1.BatchPredictionJob()
+ batch_prediction_job.display_name = "display_name_value"
+ batch_prediction_job.input_config.gcs_source.uris = ['uris_value_1', 'uris_value_2']
+ batch_prediction_job.input_config.instances_format = "instances_format_value"
+ batch_prediction_job.output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value"
+ batch_prediction_job.output_config.predictions_format = "predictions_format_value"
+
+ request = aiplatform_v1beta1.CreateBatchPredictionJobRequest(
+ parent="parent_value",
+ batch_prediction_job=batch_prediction_job,
+ )
+
+ # Make the request
+ response = client.create_batch_prediction_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateBatchPredictionJobRequest, dict]):
The request object. Request message for
@@ -1934,6 +2299,26 @@ def get_batch_prediction_job(
) -> batch_prediction_job.BatchPredictionJob:
r"""Gets a BatchPredictionJob
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_batch_prediction_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetBatchPredictionJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_batch_prediction_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetBatchPredictionJobRequest, dict]):
The request object. Request message for
@@ -2010,6 +2395,27 @@ def list_batch_prediction_jobs(
) -> pagers.ListBatchPredictionJobsPager:
r"""Lists BatchPredictionJobs in a Location.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_batch_prediction_jobs():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListBatchPredictionJobsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_batch_prediction_jobs(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsRequest, dict]):
The request object. Request message for
@@ -2094,6 +2500,31 @@ def delete_batch_prediction_job(
r"""Deletes a BatchPredictionJob. Can only be called on
jobs that already finished.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_batch_prediction_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteBatchPredictionJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_batch_prediction_job(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteBatchPredictionJobRequest, dict]):
The request object. Request message for
@@ -2200,6 +2631,24 @@ def cancel_batch_prediction_job(
is set to ``CANCELLED``. Any files already outputted by the job
are not deleted.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_cancel_batch_prediction_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.CancelBatchPredictionJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.cancel_batch_prediction_job(request=request)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CancelBatchPredictionJobRequest, dict]):
The request object. Request message for
@@ -2271,6 +2720,32 @@ def create_model_deployment_monitoring_job(
r"""Creates a ModelDeploymentMonitoringJob. It will run
periodically on a configured interval.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_model_deployment_monitoring_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ model_deployment_monitoring_job = aiplatform_v1beta1.ModelDeploymentMonitoringJob()
+ model_deployment_monitoring_job.display_name = "display_name_value"
+ model_deployment_monitoring_job.endpoint = "endpoint_value"
+
+ request = aiplatform_v1beta1.CreateModelDeploymentMonitoringJobRequest(
+ parent="parent_value",
+ model_deployment_monitoring_job=model_deployment_monitoring_job,
+ )
+
+ # Make the request
+ response = client.create_model_deployment_monitoring_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateModelDeploymentMonitoringJobRequest, dict]):
The request object. Request message for
@@ -2365,6 +2840,29 @@ def search_model_deployment_monitoring_stats_anomalies(
r"""Searches Model Monitoring Statistics generated within
a given time window.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_search_model_deployment_monitoring_stats_anomalies():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.SearchModelDeploymentMonitoringStatsAnomaliesRequest(
+ model_deployment_monitoring_job="model_deployment_monitoring_job_value",
+ deployed_model_id="deployed_model_id_value",
+ )
+
+ # Make the request
+ page_result = client.search_model_deployment_monitoring_stats_anomalies(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest, dict]):
The request object. Request message for
@@ -2372,7 +2870,7 @@ def search_model_deployment_monitoring_stats_anomalies(
model_deployment_monitoring_job (str):
Required. ModelDeploymentMonitoring Job resource name.
Format:
- \`projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}
+ ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}``
This corresponds to the ``model_deployment_monitoring_job`` field
on the ``request`` instance; if ``request`` is provided, this
@@ -2470,6 +2968,26 @@ def get_model_deployment_monitoring_job(
) -> model_deployment_monitoring_job.ModelDeploymentMonitoringJob:
r"""Gets a ModelDeploymentMonitoringJob.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_model_deployment_monitoring_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetModelDeploymentMonitoringJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_model_deployment_monitoring_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetModelDeploymentMonitoringJobRequest, dict]):
The request object. Request message for
@@ -2549,6 +3067,27 @@ def list_model_deployment_monitoring_jobs(
) -> pagers.ListModelDeploymentMonitoringJobsPager:
r"""Lists ModelDeploymentMonitoringJobs in a Location.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_model_deployment_monitoring_jobs():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListModelDeploymentMonitoringJobsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_model_deployment_monitoring_jobs(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsRequest, dict]):
The request object. Request message for
@@ -2637,6 +3176,34 @@ def update_model_deployment_monitoring_job(
) -> gac_operation.Operation:
r"""Updates a ModelDeploymentMonitoringJob.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_update_model_deployment_monitoring_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ model_deployment_monitoring_job = aiplatform_v1beta1.ModelDeploymentMonitoringJob()
+ model_deployment_monitoring_job.display_name = "display_name_value"
+ model_deployment_monitoring_job.endpoint = "endpoint_value"
+
+ request = aiplatform_v1beta1.UpdateModelDeploymentMonitoringJobRequest(
+ model_deployment_monitoring_job=model_deployment_monitoring_job,
+ )
+
+ # Make the request
+ operation = client.update_model_deployment_monitoring_job(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.UpdateModelDeploymentMonitoringJobRequest, dict]):
The request object. Request message for
@@ -2770,6 +3337,30 @@ def delete_model_deployment_monitoring_job(
) -> gac_operation.Operation:
r"""Deletes a ModelDeploymentMonitoringJob.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_model_deployment_monitoring_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteModelDeploymentMonitoringJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_model_deployment_monitoring_job(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteModelDeploymentMonitoringJobRequest, dict]):
The request object. Request message for
@@ -2872,6 +3463,24 @@ def pause_model_deployment_monitoring_job(
[ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.state]
to 'PAUSED'.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_pause_model_deployment_monitoring_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.PauseModelDeploymentMonitoringJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.pause_model_deployment_monitoring_job(request=request)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.PauseModelDeploymentMonitoringJobRequest, dict]):
The request object. Request message for
@@ -2945,6 +3554,24 @@ def resume_model_deployment_monitoring_job(
will start to run from next scheduled time. A deleted
ModelDeploymentMonitoringJob can't be resumed.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_resume_model_deployment_monitoring_job():
+ # Create a client
+ client = aiplatform_v1beta1.JobServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ResumeModelDeploymentMonitoringJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.resume_model_deployment_monitoring_job(request=request)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ResumeModelDeploymentMonitoringJobRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py
index 6602a9beca..5b8bb93a39 100644
--- a/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py
+++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py
@@ -248,6 +248,30 @@ async def create_metadata_store(
r"""Initializes a MetadataStore, including allocation of
resources.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_metadata_store():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.CreateMetadataStoreRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ operation = client.create_metadata_store(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateMetadataStoreRequest, dict]):
The request object. Request message for
@@ -356,6 +380,25 @@ async def get_metadata_store(
) -> metadata_store.MetadataStore:
r"""Retrieves a specific MetadataStore.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_metadata_store():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetMetadataStoreRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_metadata_store(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetMetadataStoreRequest, dict]):
The request object. Request message for
@@ -429,6 +472,26 @@ async def list_metadata_stores(
) -> pagers.ListMetadataStoresAsyncPager:
r"""Lists MetadataStores for a Location.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_metadata_stores():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListMetadataStoresRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_metadata_stores(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListMetadataStoresRequest, dict]):
The request object. Request message for
@@ -511,6 +574,30 @@ async def delete_metadata_store(
r"""Deletes a single MetadataStore and all its child
resources (Artifacts, Executions, and Contexts).
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_metadata_store():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteMetadataStoreRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_metadata_store(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteMetadataStoreRequest, dict]):
The request object. Request message for
@@ -606,6 +693,25 @@ async def create_artifact(
) -> gca_artifact.Artifact:
r"""Creates an Artifact associated with a MetadataStore.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_artifact():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.CreateArtifactRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ response = client.create_artifact(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateArtifactRequest, dict]):
The request object. Request message for
@@ -700,6 +806,25 @@ async def get_artifact(
) -> artifact.Artifact:
r"""Retrieves a specific Artifact.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_artifact():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetArtifactRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_artifact(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetArtifactRequest, dict]):
The request object. Request message for
@@ -770,6 +895,26 @@ async def list_artifacts(
) -> pagers.ListArtifactsAsyncPager:
r"""Lists Artifacts in the MetadataStore.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_artifacts():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListArtifactsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_artifacts(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListArtifactsRequest, dict]):
The request object. Request message for
@@ -852,6 +997,24 @@ async def update_artifact(
) -> gca_artifact.Artifact:
r"""Updates a stored Artifact.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_update_artifact():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.UpdateArtifactRequest(
+ )
+
+ # Make the request
+ response = client.update_artifact(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.UpdateArtifactRequest, dict]):
The request object. Request message for
@@ -938,6 +1101,29 @@ async def delete_artifact(
) -> operation_async.AsyncOperation:
r"""Deletes an Artifact.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_artifact():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteArtifactRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_artifact(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteArtifactRequest, dict]):
The request object. Request message for
@@ -1031,6 +1217,30 @@ async def purge_artifacts(
) -> operation_async.AsyncOperation:
r"""Purges Artifacts.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_purge_artifacts():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.PurgeArtifactsRequest(
+ parent="parent_value",
+ filter="filter_value",
+ )
+
+ # Make the request
+ operation = client.purge_artifacts(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.PurgeArtifactsRequest, dict]):
The request object. Request message for
@@ -1117,6 +1327,25 @@ async def create_context(
) -> gca_context.Context:
r"""Creates a Context associated with a MetadataStore.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_context():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.CreateContextRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ response = client.create_context(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateContextRequest, dict]):
The request object. Request message for
@@ -1211,6 +1440,25 @@ async def get_context(
) -> context.Context:
r"""Retrieves a specific Context.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_context():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetContextRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_context(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetContextRequest, dict]):
The request object. Request message for
@@ -1281,6 +1529,26 @@ async def list_contexts(
) -> pagers.ListContextsAsyncPager:
r"""Lists Contexts on the MetadataStore.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_contexts():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListContextsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_contexts(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListContextsRequest, dict]):
The request object. Request message for
@@ -1363,6 +1631,24 @@ async def update_context(
) -> gca_context.Context:
r"""Updates a stored Context.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_update_context():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.UpdateContextRequest(
+ )
+
+ # Make the request
+ response = client.update_context(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.UpdateContextRequest, dict]):
The request object. Request message for
@@ -1448,6 +1734,29 @@ async def delete_context(
) -> operation_async.AsyncOperation:
r"""Deletes a stored Context.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_context():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteContextRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_context(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteContextRequest, dict]):
The request object. Request message for
@@ -1541,6 +1850,30 @@ async def purge_contexts(
) -> operation_async.AsyncOperation:
r"""Purges Contexts.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_purge_contexts():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.PurgeContextsRequest(
+ parent="parent_value",
+ filter="filter_value",
+ )
+
+ # Make the request
+ operation = client.purge_contexts(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.PurgeContextsRequest, dict]):
The request object. Request message for
@@ -1631,6 +1964,26 @@ async def add_context_artifacts_and_executions(
If any of the Artifacts or Executions have already been
added to a Context, they are simply skipped.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_add_context_artifacts_and_executions():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.AddContextArtifactsAndExecutionsRequest(
+ context="context_value",
+ )
+
+ # Make the request
+ response = client.add_context_artifacts_and_executions(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.AddContextArtifactsAndExecutionsRequest, dict]):
The request object. Request message for
@@ -1732,6 +2085,26 @@ async def add_context_children(
cycle or cause any Context to have more than 10 parents, the
request will fail with an INVALID_ARGUMENT error.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_add_context_children():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.AddContextChildrenRequest(
+ context="context_value",
+ )
+
+ # Make the request
+ response = client.add_context_children(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.AddContextChildrenRequest, dict]):
The request object. Request message for
@@ -1818,6 +2191,26 @@ async def query_context_lineage_subgraph(
specified Context, connected by Event edges and returned
as a LineageSubgraph.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_query_context_lineage_subgraph():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.QueryContextLineageSubgraphRequest(
+ context="context_value",
+ )
+
+ # Make the request
+ response = client.query_context_lineage_subgraph(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.QueryContextLineageSubgraphRequest, dict]):
The request object. Request message for
@@ -1899,6 +2292,25 @@ async def create_execution(
) -> gca_execution.Execution:
r"""Creates an Execution associated with a MetadataStore.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_execution():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.CreateExecutionRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ response = client.create_execution(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateExecutionRequest, dict]):
The request object. Request message for
@@ -1993,6 +2405,25 @@ async def get_execution(
) -> execution.Execution:
r"""Retrieves a specific Execution.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_execution():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetExecutionRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_execution(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetExecutionRequest, dict]):
The request object. Request message for
@@ -2063,6 +2494,26 @@ async def list_executions(
) -> pagers.ListExecutionsAsyncPager:
r"""Lists Executions in the MetadataStore.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_executions():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListExecutionsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_executions(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListExecutionsRequest, dict]):
The request object. Request message for
@@ -2145,6 +2596,24 @@ async def update_execution(
) -> gca_execution.Execution:
r"""Updates a stored Execution.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_update_execution():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.UpdateExecutionRequest(
+ )
+
+ # Make the request
+ response = client.update_execution(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.UpdateExecutionRequest, dict]):
The request object. Request message for
@@ -2231,6 +2700,29 @@ async def delete_execution(
) -> operation_async.AsyncOperation:
r"""Deletes an Execution.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_execution():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteExecutionRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_execution(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteExecutionRequest, dict]):
The request object. Request message for
@@ -2324,6 +2816,30 @@ async def purge_executions(
) -> operation_async.AsyncOperation:
r"""Purges Executions.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_purge_executions():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.PurgeExecutionsRequest(
+ parent="parent_value",
+ filter="filter_value",
+ )
+
+ # Make the request
+ operation = client.purge_executions(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.PurgeExecutionsRequest, dict]):
The request object. Request message for
@@ -2413,6 +2929,26 @@ async def add_execution_events(
between the Execution and the Artifact, the Event is
skipped.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_add_execution_events():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.AddExecutionEventsRequest(
+ execution="execution_value",
+ )
+
+ # Make the request
+ response = client.add_execution_events(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.AddExecutionEventsRequest, dict]):
The request object. Request message for
@@ -2498,6 +3034,26 @@ async def query_execution_inputs_and_outputs(
this Execution, in the form of LineageSubgraph that also
contains the Execution and connecting Events.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_query_execution_inputs_and_outputs():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.QueryExecutionInputsAndOutputsRequest(
+ execution="execution_value",
+ )
+
+ # Make the request
+ response = client.query_execution_inputs_and_outputs(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.QueryExecutionInputsAndOutputsRequest, dict]):
The request object. Request message for
@@ -2576,6 +3132,29 @@ async def create_metadata_schema(
) -> gca_metadata_schema.MetadataSchema:
r"""Creates a MetadataSchema.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_metadata_schema():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ metadata_schema = aiplatform_v1beta1.MetadataSchema()
+ metadata_schema.schema = "schema_value"
+
+ request = aiplatform_v1beta1.CreateMetadataSchemaRequest(
+ parent="parent_value",
+ metadata_schema=metadata_schema,
+ )
+
+ # Make the request
+ response = client.create_metadata_schema(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateMetadataSchemaRequest, dict]):
The request object. Request message for
@@ -2672,6 +3251,25 @@ async def get_metadata_schema(
) -> metadata_schema.MetadataSchema:
r"""Retrieves a specific MetadataSchema.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_metadata_schema():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetMetadataSchemaRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_metadata_schema(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetMetadataSchemaRequest, dict]):
The request object. Request message for
@@ -2742,6 +3340,26 @@ async def list_metadata_schemas(
) -> pagers.ListMetadataSchemasAsyncPager:
r"""Lists MetadataSchemas.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_metadata_schemas():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListMetadataSchemasRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_metadata_schemas(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasRequest, dict]):
The request object. Request message for
@@ -2827,6 +3445,26 @@ async def query_artifact_lineage_subgraph(
Artifacts and Executions connected by Event edges and
returned as a LineageSubgraph.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_query_artifact_lineage_subgraph():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.QueryArtifactLineageSubgraphRequest(
+ artifact="artifact_value",
+ )
+
+ # Make the request
+ response = client.query_artifact_lineage_subgraph(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.QueryArtifactLineageSubgraphRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py
index 01fc9bf5d7..36ee7f6b70 100644
--- a/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py
+++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py
@@ -517,6 +517,31 @@ def create_metadata_store(
r"""Initializes a MetadataStore, including allocation of
resources.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_metadata_store():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.CreateMetadataStoreRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ operation = client.create_metadata_store(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateMetadataStoreRequest, dict]):
The request object. Request message for
@@ -625,6 +650,26 @@ def get_metadata_store(
) -> metadata_store.MetadataStore:
r"""Retrieves a specific MetadataStore.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_metadata_store():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetMetadataStoreRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_metadata_store(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetMetadataStoreRequest, dict]):
The request object. Request message for
@@ -698,6 +743,27 @@ def list_metadata_stores(
) -> pagers.ListMetadataStoresPager:
r"""Lists MetadataStores for a Location.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_metadata_stores():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListMetadataStoresRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_metadata_stores(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListMetadataStoresRequest, dict]):
The request object. Request message for
@@ -780,6 +846,31 @@ def delete_metadata_store(
r"""Deletes a single MetadataStore and all its child
resources (Artifacts, Executions, and Contexts).
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_metadata_store():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteMetadataStoreRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_metadata_store(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteMetadataStoreRequest, dict]):
The request object. Request message for
@@ -875,6 +966,26 @@ def create_artifact(
) -> gca_artifact.Artifact:
r"""Creates an Artifact associated with a MetadataStore.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_artifact():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.CreateArtifactRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ response = client.create_artifact(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateArtifactRequest, dict]):
The request object. Request message for
@@ -969,6 +1080,26 @@ def get_artifact(
) -> artifact.Artifact:
r"""Retrieves a specific Artifact.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_artifact():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetArtifactRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_artifact(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetArtifactRequest, dict]):
The request object. Request message for
@@ -1039,6 +1170,27 @@ def list_artifacts(
) -> pagers.ListArtifactsPager:
r"""Lists Artifacts in the MetadataStore.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_artifacts():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListArtifactsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_artifacts(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListArtifactsRequest, dict]):
The request object. Request message for
@@ -1121,6 +1273,25 @@ def update_artifact(
) -> gca_artifact.Artifact:
r"""Updates a stored Artifact.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_update_artifact():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.UpdateArtifactRequest(
+ )
+
+ # Make the request
+ response = client.update_artifact(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.UpdateArtifactRequest, dict]):
The request object. Request message for
@@ -1207,6 +1378,30 @@ def delete_artifact(
) -> gac_operation.Operation:
r"""Deletes an Artifact.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_artifact():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteArtifactRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_artifact(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteArtifactRequest, dict]):
The request object. Request message for
@@ -1300,6 +1495,31 @@ def purge_artifacts(
) -> gac_operation.Operation:
r"""Purges Artifacts.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_purge_artifacts():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.PurgeArtifactsRequest(
+ parent="parent_value",
+ filter="filter_value",
+ )
+
+ # Make the request
+ operation = client.purge_artifacts(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.PurgeArtifactsRequest, dict]):
The request object. Request message for
@@ -1386,6 +1606,26 @@ def create_context(
) -> gca_context.Context:
r"""Creates a Context associated with a MetadataStore.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_context():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.CreateContextRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ response = client.create_context(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateContextRequest, dict]):
The request object. Request message for
@@ -1480,6 +1720,26 @@ def get_context(
) -> context.Context:
r"""Retrieves a specific Context.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_context():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetContextRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_context(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetContextRequest, dict]):
The request object. Request message for
@@ -1550,6 +1810,27 @@ def list_contexts(
) -> pagers.ListContextsPager:
r"""Lists Contexts on the MetadataStore.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_contexts():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListContextsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_contexts(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListContextsRequest, dict]):
The request object. Request message for
@@ -1632,6 +1913,25 @@ def update_context(
) -> gca_context.Context:
r"""Updates a stored Context.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_update_context():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.UpdateContextRequest(
+ )
+
+ # Make the request
+ response = client.update_context(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.UpdateContextRequest, dict]):
The request object. Request message for
@@ -1717,6 +2017,30 @@ def delete_context(
) -> gac_operation.Operation:
r"""Deletes a stored Context.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_context():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteContextRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_context(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteContextRequest, dict]):
The request object. Request message for
@@ -1810,6 +2134,31 @@ def purge_contexts(
) -> gac_operation.Operation:
r"""Purges Contexts.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_purge_contexts():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.PurgeContextsRequest(
+ parent="parent_value",
+ filter="filter_value",
+ )
+
+ # Make the request
+ operation = client.purge_contexts(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.PurgeContextsRequest, dict]):
The request object. Request message for
@@ -1900,6 +2249,27 @@ def add_context_artifacts_and_executions(
If any of the Artifacts or Executions have already been
added to a Context, they are simply skipped.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_add_context_artifacts_and_executions():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.AddContextArtifactsAndExecutionsRequest(
+ context="context_value",
+ )
+
+ # Make the request
+ response = client.add_context_artifacts_and_executions(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.AddContextArtifactsAndExecutionsRequest, dict]):
The request object. Request message for
@@ -2005,6 +2375,27 @@ def add_context_children(
cycle or cause any Context to have more than 10 parents, the
request will fail with an INVALID_ARGUMENT error.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_add_context_children():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.AddContextChildrenRequest(
+ context="context_value",
+ )
+
+ # Make the request
+ response = client.add_context_children(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.AddContextChildrenRequest, dict]):
The request object. Request message for
@@ -2091,6 +2482,27 @@ def query_context_lineage_subgraph(
specified Context, connected by Event edges and returned
as a LineageSubgraph.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_query_context_lineage_subgraph():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.QueryContextLineageSubgraphRequest(
+ context="context_value",
+ )
+
+ # Make the request
+ response = client.query_context_lineage_subgraph(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.QueryContextLineageSubgraphRequest, dict]):
The request object. Request message for
@@ -2174,6 +2586,26 @@ def create_execution(
) -> gca_execution.Execution:
r"""Creates an Execution associated with a MetadataStore.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_execution():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.CreateExecutionRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ response = client.create_execution(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateExecutionRequest, dict]):
The request object. Request message for
@@ -2268,6 +2700,26 @@ def get_execution(
) -> execution.Execution:
r"""Retrieves a specific Execution.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_execution():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetExecutionRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_execution(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetExecutionRequest, dict]):
The request object. Request message for
@@ -2338,6 +2790,27 @@ def list_executions(
) -> pagers.ListExecutionsPager:
r"""Lists Executions in the MetadataStore.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_executions():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListExecutionsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_executions(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListExecutionsRequest, dict]):
The request object. Request message for
@@ -2420,6 +2893,25 @@ def update_execution(
) -> gca_execution.Execution:
r"""Updates a stored Execution.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_update_execution():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.UpdateExecutionRequest(
+ )
+
+ # Make the request
+ response = client.update_execution(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.UpdateExecutionRequest, dict]):
The request object. Request message for
@@ -2506,6 +2998,30 @@ def delete_execution(
) -> gac_operation.Operation:
r"""Deletes an Execution.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_execution():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteExecutionRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_execution(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteExecutionRequest, dict]):
The request object. Request message for
@@ -2599,6 +3115,31 @@ def purge_executions(
) -> gac_operation.Operation:
r"""Purges Executions.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_purge_executions():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.PurgeExecutionsRequest(
+ parent="parent_value",
+ filter="filter_value",
+ )
+
+ # Make the request
+ operation = client.purge_executions(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.PurgeExecutionsRequest, dict]):
The request object. Request message for
@@ -2688,6 +3229,27 @@ def add_execution_events(
between the Execution and the Artifact, the Event is
skipped.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_add_execution_events():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.AddExecutionEventsRequest(
+ execution="execution_value",
+ )
+
+ # Make the request
+ response = client.add_execution_events(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.AddExecutionEventsRequest, dict]):
The request object. Request message for
@@ -2773,6 +3335,27 @@ def query_execution_inputs_and_outputs(
this Execution, in the form of LineageSubgraph that also
contains the Execution and connecting Events.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_query_execution_inputs_and_outputs():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.QueryExecutionInputsAndOutputsRequest(
+ execution="execution_value",
+ )
+
+ # Make the request
+ response = client.query_execution_inputs_and_outputs(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.QueryExecutionInputsAndOutputsRequest, dict]):
The request object. Request message for
@@ -2855,6 +3438,30 @@ def create_metadata_schema(
) -> gca_metadata_schema.MetadataSchema:
r"""Creates a MetadataSchema.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_metadata_schema():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ metadata_schema = aiplatform_v1beta1.MetadataSchema()
+ metadata_schema.schema = "schema_value"
+
+ request = aiplatform_v1beta1.CreateMetadataSchemaRequest(
+ parent="parent_value",
+ metadata_schema=metadata_schema,
+ )
+
+ # Make the request
+ response = client.create_metadata_schema(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateMetadataSchemaRequest, dict]):
The request object. Request message for
@@ -2951,6 +3558,26 @@ def get_metadata_schema(
) -> metadata_schema.MetadataSchema:
r"""Retrieves a specific MetadataSchema.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_metadata_schema():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetMetadataSchemaRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_metadata_schema(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetMetadataSchemaRequest, dict]):
The request object. Request message for
@@ -3021,6 +3648,27 @@ def list_metadata_schemas(
) -> pagers.ListMetadataSchemasPager:
r"""Lists MetadataSchemas.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_metadata_schemas():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListMetadataSchemasRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_metadata_schemas(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasRequest, dict]):
The request object. Request message for
@@ -3106,6 +3754,27 @@ def query_artifact_lineage_subgraph(
Artifacts and Executions connected by Event edges and
returned as a LineageSubgraph.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_query_artifact_lineage_subgraph():
+ # Create a client
+ client = aiplatform_v1beta1.MetadataServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.QueryArtifactLineageSubgraphRequest(
+ artifact="artifact_value",
+ )
+
+ # Make the request
+ response = client.query_artifact_lineage_subgraph(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.QueryArtifactLineageSubgraphRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py
index 6aaadc9020..de4b0c748b 100644
--- a/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py
+++ b/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py
@@ -235,6 +235,27 @@ async def search_migratable_resources(
ml.googleapis.com that can be migrated to Vertex AI's
given location.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_search_migratable_resources():
+ # Create a client
+ client = aiplatform_v1beta1.MigrationServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.SearchMigratableResourcesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.search_migratable_resources(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesRequest, dict]):
The request object. Request message for
@@ -323,6 +344,36 @@ async def batch_migrate_resources(
automl.googleapis.com, and datalabeling.googleapis.com
to Vertex AI.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_batch_migrate_resources():
+ # Create a client
+ client = aiplatform_v1beta1.MigrationServiceClient()
+
+ # Initialize request argument(s)
+ migrate_resource_requests = aiplatform_v1beta1.MigrateResourceRequest()
+ migrate_resource_requests.migrate_ml_engine_model_version_config.endpoint = "endpoint_value"
+ migrate_resource_requests.migrate_ml_engine_model_version_config.model_version = "model_version_value"
+ migrate_resource_requests.migrate_ml_engine_model_version_config.model_display_name = "model_display_name_value"
+
+ request = aiplatform_v1beta1.BatchMigrateResourcesRequest(
+ parent="parent_value",
+ migrate_resource_requests=migrate_resource_requests,
+ )
+
+ # Make the request
+ operation = client.batch_migrate_resources(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.BatchMigrateResourcesRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/client.py b/google/cloud/aiplatform_v1beta1/services/migration_service/client.py
index 24b0de3a98..4ef1fb73a4 100644
--- a/google/cloud/aiplatform_v1beta1/services/migration_service/client.py
+++ b/google/cloud/aiplatform_v1beta1/services/migration_service/client.py
@@ -513,6 +513,28 @@ def search_migratable_resources(
ml.googleapis.com that can be migrated to Vertex AI's
given location.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_search_migratable_resources():
+ # Create a client
+ client = aiplatform_v1beta1.MigrationServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.SearchMigratableResourcesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.search_migratable_resources(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesRequest, dict]):
The request object. Request message for
@@ -603,6 +625,37 @@ def batch_migrate_resources(
automl.googleapis.com, and datalabeling.googleapis.com
to Vertex AI.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_batch_migrate_resources():
+ # Create a client
+ client = aiplatform_v1beta1.MigrationServiceClient()
+
+ # Initialize request argument(s)
+ migrate_resource_requests = aiplatform_v1beta1.MigrateResourceRequest()
+ migrate_resource_requests.migrate_ml_engine_model_version_config.endpoint = "endpoint_value"
+ migrate_resource_requests.migrate_ml_engine_model_version_config.model_version = "model_version_value"
+ migrate_resource_requests.migrate_ml_engine_model_version_config.model_display_name = "model_display_name_value"
+
+ request = aiplatform_v1beta1.BatchMigrateResourcesRequest(
+ parent="parent_value",
+ migrate_resource_requests=migrate_resource_requests,
+ )
+
+ # Make the request
+ operation = client.batch_migrate_resources(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.BatchMigrateResourcesRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py
index c7c086c876..1f908cf6d9 100644
--- a/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py
+++ b/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py
@@ -240,6 +240,33 @@ async def upload_model(
) -> operation_async.AsyncOperation:
r"""Uploads a Model artifact into Vertex AI.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_upload_model():
+ # Create a client
+ client = aiplatform_v1beta1.ModelServiceClient()
+
+ # Initialize request argument(s)
+ model = aiplatform_v1beta1.Model()
+ model.display_name = "display_name_value"
+
+ request = aiplatform_v1beta1.UploadModelRequest(
+ parent="parent_value",
+ model=model,
+ )
+
+ # Make the request
+ operation = client.upload_model(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.UploadModelRequest, dict]):
The request object. Request message for
@@ -332,6 +359,25 @@ async def get_model(
) -> model.Model:
r"""Gets a Model.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_model():
+ # Create a client
+ client = aiplatform_v1beta1.ModelServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetModelRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_model(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetModelRequest, dict]):
The request object. Request message for
@@ -401,6 +447,26 @@ async def list_models(
) -> pagers.ListModelsAsyncPager:
r"""Lists Models in a Location.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_models():
+ # Create a client
+ client = aiplatform_v1beta1.ModelServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListModelsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_models(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListModelsRequest, dict]):
The request object. Request message for
@@ -483,6 +549,28 @@ async def update_model(
) -> gca_model.Model:
r"""Updates a Model.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_update_model():
+ # Create a client
+ client = aiplatform_v1beta1.ModelServiceClient()
+
+ # Initialize request argument(s)
+ model = aiplatform_v1beta1.Model()
+ model.display_name = "display_name_value"
+
+ request = aiplatform_v1beta1.UpdateModelRequest(
+ model=model,
+ )
+
+ # Make the request
+ response = client.update_model(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.UpdateModelRequest, dict]):
The request object. Request message for
@@ -572,6 +660,30 @@ async def delete_model(
[deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models]
field.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_model():
+ # Create a client
+ client = aiplatform_v1beta1.ModelServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteModelRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_model(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteModelRequest, dict]):
The request object. Request message for
@@ -669,6 +781,30 @@ async def export_model(
least one [supported export
format][google.cloud.aiplatform.v1beta1.Model.supported_export_formats].
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_export_model():
+ # Create a client
+ client = aiplatform_v1beta1.ModelServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ExportModelRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.export_model(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ExportModelRequest, dict]):
The request object. Request message for
@@ -762,6 +898,25 @@ async def get_model_evaluation(
) -> model_evaluation.ModelEvaluation:
r"""Gets a ModelEvaluation.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_model_evaluation():
+ # Create a client
+ client = aiplatform_v1beta1.ModelServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetModelEvaluationRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_model_evaluation(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetModelEvaluationRequest, dict]):
The request object. Request message for
@@ -836,6 +991,26 @@ async def list_model_evaluations(
) -> pagers.ListModelEvaluationsAsyncPager:
r"""Lists ModelEvaluations in a Model.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_model_evaluations():
+ # Create a client
+ client = aiplatform_v1beta1.ModelServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListModelEvaluationsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_model_evaluations(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsRequest, dict]):
The request object. Request message for
@@ -917,6 +1092,25 @@ async def get_model_evaluation_slice(
) -> model_evaluation_slice.ModelEvaluationSlice:
r"""Gets a ModelEvaluationSlice.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_model_evaluation_slice():
+ # Create a client
+ client = aiplatform_v1beta1.ModelServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetModelEvaluationSliceRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_model_evaluation_slice(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetModelEvaluationSliceRequest, dict]):
The request object. Request message for
@@ -991,6 +1185,26 @@ async def list_model_evaluation_slices(
) -> pagers.ListModelEvaluationSlicesAsyncPager:
r"""Lists ModelEvaluationSlices in a ModelEvaluation.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_model_evaluation_slices():
+ # Create a client
+ client = aiplatform_v1beta1.ModelServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListModelEvaluationSlicesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_model_evaluation_slices(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/client.py b/google/cloud/aiplatform_v1beta1/services/model_service/client.py
index 086243f777..78c82ab8c5 100644
--- a/google/cloud/aiplatform_v1beta1/services/model_service/client.py
+++ b/google/cloud/aiplatform_v1beta1/services/model_service/client.py
@@ -497,6 +497,34 @@ def upload_model(
) -> gac_operation.Operation:
r"""Uploads a Model artifact into Vertex AI.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_upload_model():
+ # Create a client
+ client = aiplatform_v1beta1.ModelServiceClient()
+
+ # Initialize request argument(s)
+ model = aiplatform_v1beta1.Model()
+ model.display_name = "display_name_value"
+
+ request = aiplatform_v1beta1.UploadModelRequest(
+ parent="parent_value",
+ model=model,
+ )
+
+ # Make the request
+ operation = client.upload_model(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.UploadModelRequest, dict]):
The request object. Request message for
@@ -589,6 +617,26 @@ def get_model(
) -> model.Model:
r"""Gets a Model.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_model():
+ # Create a client
+ client = aiplatform_v1beta1.ModelServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetModelRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_model(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetModelRequest, dict]):
The request object. Request message for
@@ -658,6 +706,27 @@ def list_models(
) -> pagers.ListModelsPager:
r"""Lists Models in a Location.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_models():
+ # Create a client
+ client = aiplatform_v1beta1.ModelServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListModelsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_models(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListModelsRequest, dict]):
The request object. Request message for
@@ -740,6 +809,29 @@ def update_model(
) -> gca_model.Model:
r"""Updates a Model.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_update_model():
+ # Create a client
+ client = aiplatform_v1beta1.ModelServiceClient()
+
+ # Initialize request argument(s)
+ model = aiplatform_v1beta1.Model()
+ model.display_name = "display_name_value"
+
+ request = aiplatform_v1beta1.UpdateModelRequest(
+ model=model,
+ )
+
+ # Make the request
+ response = client.update_model(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.UpdateModelRequest, dict]):
The request object. Request message for
@@ -829,6 +921,31 @@ def delete_model(
[deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models]
field.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_model():
+ # Create a client
+ client = aiplatform_v1beta1.ModelServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteModelRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_model(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteModelRequest, dict]):
The request object. Request message for
@@ -926,6 +1043,31 @@ def export_model(
least one [supported export
format][google.cloud.aiplatform.v1beta1.Model.supported_export_formats].
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_export_model():
+ # Create a client
+ client = aiplatform_v1beta1.ModelServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ExportModelRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.export_model(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ExportModelRequest, dict]):
The request object. Request message for
@@ -1019,6 +1161,26 @@ def get_model_evaluation(
) -> model_evaluation.ModelEvaluation:
r"""Gets a ModelEvaluation.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_model_evaluation():
+ # Create a client
+ client = aiplatform_v1beta1.ModelServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetModelEvaluationRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_model_evaluation(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetModelEvaluationRequest, dict]):
The request object. Request message for
@@ -1093,6 +1255,27 @@ def list_model_evaluations(
) -> pagers.ListModelEvaluationsPager:
r"""Lists ModelEvaluations in a Model.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_model_evaluations():
+ # Create a client
+ client = aiplatform_v1beta1.ModelServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListModelEvaluationsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_model_evaluations(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsRequest, dict]):
The request object. Request message for
@@ -1174,6 +1357,26 @@ def get_model_evaluation_slice(
) -> model_evaluation_slice.ModelEvaluationSlice:
r"""Gets a ModelEvaluationSlice.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_model_evaluation_slice():
+ # Create a client
+ client = aiplatform_v1beta1.ModelServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetModelEvaluationSliceRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_model_evaluation_slice(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetModelEvaluationSliceRequest, dict]):
The request object. Request message for
@@ -1250,6 +1453,27 @@ def list_model_evaluation_slices(
) -> pagers.ListModelEvaluationSlicesPager:
r"""Lists ModelEvaluationSlices in a ModelEvaluation.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_model_evaluation_slices():
+ # Create a client
+ client = aiplatform_v1beta1.ModelServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListModelEvaluationSlicesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_model_evaluation_slices(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py
index 0f884c970b..4000d3defc 100644
--- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py
+++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py
@@ -255,6 +255,32 @@ async def create_training_pipeline(
r"""Creates a TrainingPipeline. A created
TrainingPipeline right away will be attempted to be run.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_training_pipeline():
+ # Create a client
+ client = aiplatform_v1beta1.PipelineServiceClient()
+
+ # Initialize request argument(s)
+ training_pipeline = aiplatform_v1beta1.TrainingPipeline()
+ training_pipeline.display_name = "display_name_value"
+ training_pipeline.training_task_definition = "training_task_definition_value"
+ training_pipeline.training_task_inputs.null_value = "NULL_VALUE"
+
+ request = aiplatform_v1beta1.CreateTrainingPipelineRequest(
+ parent="parent_value",
+ training_pipeline=training_pipeline,
+ )
+
+ # Make the request
+ response = client.create_training_pipeline(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateTrainingPipelineRequest, dict]):
The request object. Request message for
@@ -340,6 +366,25 @@ async def get_training_pipeline(
) -> training_pipeline.TrainingPipeline:
r"""Gets a TrainingPipeline.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_training_pipeline():
+ # Create a client
+ client = aiplatform_v1beta1.PipelineServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetTrainingPipelineRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_training_pipeline(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetTrainingPipelineRequest, dict]):
The request object. Request message for
@@ -416,6 +461,26 @@ async def list_training_pipelines(
) -> pagers.ListTrainingPipelinesAsyncPager:
r"""Lists TrainingPipelines in a Location.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_training_pipelines():
+ # Create a client
+ client = aiplatform_v1beta1.PipelineServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListTrainingPipelinesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_training_pipelines(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesRequest, dict]):
The request object. Request message for
@@ -497,6 +562,29 @@ async def delete_training_pipeline(
) -> operation_async.AsyncOperation:
r"""Deletes a TrainingPipeline.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_training_pipeline():
+ # Create a client
+ client = aiplatform_v1beta1.PipelineServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteTrainingPipelineRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_training_pipeline(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteTrainingPipelineRequest, dict]):
The request object. Request message for
@@ -602,6 +690,23 @@ async def cancel_training_pipeline(
[TrainingPipeline.state][google.cloud.aiplatform.v1beta1.TrainingPipeline.state]
is set to ``CANCELLED``.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_cancel_training_pipeline():
+ # Create a client
+ client = aiplatform_v1beta1.PipelineServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.CancelTrainingPipelineRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.cancel_training_pipeline(request=request)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CancelTrainingPipelineRequest, dict]):
The request object. Request message for
@@ -670,6 +775,26 @@ async def create_pipeline_job(
r"""Creates a PipelineJob. A PipelineJob will run
immediately when created.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_pipeline_job():
+ # Create a client
+ client = aiplatform_v1beta1.PipelineServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.CreatePipelineJobRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ response = client.create_pipeline_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreatePipelineJobRequest, dict]):
The request object. Request message for
@@ -762,6 +887,25 @@ async def get_pipeline_job(
) -> pipeline_job.PipelineJob:
r"""Gets a PipelineJob.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_pipeline_job():
+ # Create a client
+ client = aiplatform_v1beta1.PipelineServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetPipelineJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_pipeline_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetPipelineJobRequest, dict]):
The request object. Request message for
@@ -833,6 +977,26 @@ async def list_pipeline_jobs(
) -> pagers.ListPipelineJobsAsyncPager:
r"""Lists PipelineJobs in a Location.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_pipeline_jobs():
+ # Create a client
+ client = aiplatform_v1beta1.PipelineServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListPipelineJobsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_pipeline_jobs(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListPipelineJobsRequest, dict]):
The request object. Request message for
@@ -914,6 +1078,29 @@ async def delete_pipeline_job(
) -> operation_async.AsyncOperation:
r"""Deletes a PipelineJob.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_pipeline_job():
+ # Create a client
+ client = aiplatform_v1beta1.PipelineServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeletePipelineJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_pipeline_job(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeletePipelineJobRequest, dict]):
The request object. Request message for
@@ -1019,6 +1206,23 @@ async def cancel_pipeline_job(
[PipelineJob.state][google.cloud.aiplatform.v1beta1.PipelineJob.state]
is set to ``CANCELLED``.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_cancel_pipeline_job():
+ # Create a client
+ client = aiplatform_v1beta1.PipelineServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.CancelPipelineJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.cancel_pipeline_job(request=request)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CancelPipelineJobRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py
index 4a87274ada..9e353b45d4 100644
--- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py
+++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py
@@ -576,6 +576,33 @@ def create_training_pipeline(
r"""Creates a TrainingPipeline. A created
TrainingPipeline right away will be attempted to be run.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_training_pipeline():
+ # Create a client
+ client = aiplatform_v1beta1.PipelineServiceClient()
+
+ # Initialize request argument(s)
+ training_pipeline = aiplatform_v1beta1.TrainingPipeline()
+ training_pipeline.display_name = "display_name_value"
+ training_pipeline.training_task_definition = "training_task_definition_value"
+ training_pipeline.training_task_inputs.null_value = "NULL_VALUE"
+
+ request = aiplatform_v1beta1.CreateTrainingPipelineRequest(
+ parent="parent_value",
+ training_pipeline=training_pipeline,
+ )
+
+ # Make the request
+ response = client.create_training_pipeline(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateTrainingPipelineRequest, dict]):
The request object. Request message for
@@ -661,6 +688,26 @@ def get_training_pipeline(
) -> training_pipeline.TrainingPipeline:
r"""Gets a TrainingPipeline.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_training_pipeline():
+ # Create a client
+ client = aiplatform_v1beta1.PipelineServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetTrainingPipelineRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_training_pipeline(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetTrainingPipelineRequest, dict]):
The request object. Request message for
@@ -737,6 +784,27 @@ def list_training_pipelines(
) -> pagers.ListTrainingPipelinesPager:
r"""Lists TrainingPipelines in a Location.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_training_pipelines():
+ # Create a client
+ client = aiplatform_v1beta1.PipelineServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListTrainingPipelinesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_training_pipelines(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesRequest, dict]):
The request object. Request message for
@@ -818,6 +886,30 @@ def delete_training_pipeline(
) -> gac_operation.Operation:
r"""Deletes a TrainingPipeline.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_training_pipeline():
+ # Create a client
+ client = aiplatform_v1beta1.PipelineServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteTrainingPipelineRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_training_pipeline(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteTrainingPipelineRequest, dict]):
The request object. Request message for
@@ -923,6 +1015,24 @@ def cancel_training_pipeline(
[TrainingPipeline.state][google.cloud.aiplatform.v1beta1.TrainingPipeline.state]
is set to ``CANCELLED``.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_cancel_training_pipeline():
+ # Create a client
+ client = aiplatform_v1beta1.PipelineServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.CancelTrainingPipelineRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.cancel_training_pipeline(request=request)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CancelTrainingPipelineRequest, dict]):
The request object. Request message for
@@ -991,6 +1101,27 @@ def create_pipeline_job(
r"""Creates a PipelineJob. A PipelineJob will run
immediately when created.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_pipeline_job():
+ # Create a client
+ client = aiplatform_v1beta1.PipelineServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.CreatePipelineJobRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ response = client.create_pipeline_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreatePipelineJobRequest, dict]):
The request object. Request message for
@@ -1083,6 +1214,26 @@ def get_pipeline_job(
) -> pipeline_job.PipelineJob:
r"""Gets a PipelineJob.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_pipeline_job():
+ # Create a client
+ client = aiplatform_v1beta1.PipelineServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetPipelineJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_pipeline_job(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetPipelineJobRequest, dict]):
The request object. Request message for
@@ -1154,6 +1305,27 @@ def list_pipeline_jobs(
) -> pagers.ListPipelineJobsPager:
r"""Lists PipelineJobs in a Location.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_pipeline_jobs():
+ # Create a client
+ client = aiplatform_v1beta1.PipelineServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListPipelineJobsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_pipeline_jobs(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListPipelineJobsRequest, dict]):
The request object. Request message for
@@ -1235,6 +1407,30 @@ def delete_pipeline_job(
) -> gac_operation.Operation:
r"""Deletes a PipelineJob.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_pipeline_job():
+ # Create a client
+ client = aiplatform_v1beta1.PipelineServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeletePipelineJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_pipeline_job(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeletePipelineJobRequest, dict]):
The request object. Request message for
@@ -1340,6 +1536,24 @@ def cancel_pipeline_job(
[PipelineJob.state][google.cloud.aiplatform.v1beta1.PipelineJob.state]
is set to ``CANCELLED``.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_cancel_pipeline_job():
+ # Create a client
+ client = aiplatform_v1beta1.PipelineServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.CancelPipelineJobRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.cancel_pipeline_job(request=request)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CancelPipelineJobRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py
index c80296e658..33d57fd6d9 100644
--- a/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py
+++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py
@@ -220,6 +220,29 @@ async def predict(
) -> prediction_service.PredictResponse:
r"""Perform an online prediction.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_predict():
+ # Create a client
+ client = aiplatform_v1beta1.PredictionServiceClient()
+
+ # Initialize request argument(s)
+ instances = aiplatform_v1beta1.Value()
+ instances.null_value = "NULL_VALUE"
+
+ request = aiplatform_v1beta1.PredictRequest(
+ endpoint="endpoint_value",
+ instances=instances,
+ )
+
+ # Make the request
+ response = client.predict(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.PredictRequest, dict]):
The request object. Request message for
@@ -334,6 +357,26 @@ async def raw_predict(
[DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel]
that served this prediction.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_raw_predict():
+ # Create a client
+ client = aiplatform_v1beta1.PredictionServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.RawPredictRequest(
+ endpoint="endpoint_value",
+ )
+
+ # Make the request
+ response = client.raw_predict(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.RawPredictRequest, dict]):
The request object. Request message for
@@ -492,6 +535,30 @@ async def explain(
populated. Only deployed AutoML tabular Models have
explanation_spec.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_explain():
+ # Create a client
+ client = aiplatform_v1beta1.PredictionServiceClient()
+
+ # Initialize request argument(s)
+ instances = aiplatform_v1beta1.Value()
+ instances.null_value = "NULL_VALUE"
+
+ request = aiplatform_v1beta1.ExplainRequest(
+ endpoint="endpoint_value",
+ instances=instances,
+ )
+
+ # Make the request
+ response = client.explain(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ExplainRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py
index 4fa9fabd7a..421e5062d9 100644
--- a/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py
+++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py
@@ -433,6 +433,30 @@ def predict(
) -> prediction_service.PredictResponse:
r"""Perform an online prediction.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_predict():
+ # Create a client
+ client = aiplatform_v1beta1.PredictionServiceClient()
+
+ # Initialize request argument(s)
+ instances = aiplatform_v1beta1.Value()
+ instances.null_value = "NULL_VALUE"
+
+ request = aiplatform_v1beta1.PredictRequest(
+ endpoint="endpoint_value",
+ instances=instances,
+ )
+
+ # Make the request
+ response = client.predict(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.PredictRequest, dict]):
The request object. Request message for
@@ -547,6 +571,27 @@ def raw_predict(
[DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel]
that served this prediction.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_raw_predict():
+ # Create a client
+ client = aiplatform_v1beta1.PredictionServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.RawPredictRequest(
+ endpoint="endpoint_value",
+ )
+
+ # Make the request
+ response = client.raw_predict(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.RawPredictRequest, dict]):
The request object. Request message for
@@ -705,6 +750,31 @@ def explain(
populated. Only deployed AutoML tabular Models have
explanation_spec.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_explain():
+ # Create a client
+ client = aiplatform_v1beta1.PredictionServiceClient()
+
+ # Initialize request argument(s)
+ instances = aiplatform_v1beta1.Value()
+ instances.null_value = "NULL_VALUE"
+
+ request = aiplatform_v1beta1.ExplainRequest(
+ endpoint="endpoint_value",
+ instances=instances,
+ )
+
+ # Make the request
+ response = client.explain(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ExplainRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py
index cee1e51061..3a13a4d4ea 100644
--- a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py
+++ b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py
@@ -236,6 +236,34 @@ async def create_specialist_pool(
) -> operation_async.AsyncOperation:
r"""Creates a SpecialistPool.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_specialist_pool():
+ # Create a client
+ client = aiplatform_v1beta1.SpecialistPoolServiceClient()
+
+ # Initialize request argument(s)
+ specialist_pool = aiplatform_v1beta1.SpecialistPool()
+ specialist_pool.name = "name_value"
+ specialist_pool.display_name = "display_name_value"
+
+ request = aiplatform_v1beta1.CreateSpecialistPoolRequest(
+ parent="parent_value",
+ specialist_pool=specialist_pool,
+ )
+
+ # Make the request
+ operation = client.create_specialist_pool(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateSpecialistPoolRequest, dict]):
The request object. Request message for
@@ -333,6 +361,25 @@ async def get_specialist_pool(
) -> specialist_pool.SpecialistPool:
r"""Gets a SpecialistPool.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_specialist_pool():
+ # Create a client
+ client = aiplatform_v1beta1.SpecialistPoolServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetSpecialistPoolRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_specialist_pool(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetSpecialistPoolRequest, dict]):
The request object. Request message for
@@ -415,6 +462,26 @@ async def list_specialist_pools(
) -> pagers.ListSpecialistPoolsAsyncPager:
r"""Lists SpecialistPools in a Location.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_specialist_pools():
+ # Create a client
+ client = aiplatform_v1beta1.SpecialistPoolServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListSpecialistPoolsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_specialist_pools(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsRequest, dict]):
The request object. Request message for
@@ -499,6 +566,30 @@ async def delete_specialist_pool(
r"""Deletes a SpecialistPool as well as all Specialists
in the pool.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_specialist_pool():
+ # Create a client
+ client = aiplatform_v1beta1.SpecialistPoolServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteSpecialistPoolRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_specialist_pool(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteSpecialistPoolRequest, dict]):
The request object. Request message for
@@ -595,6 +686,33 @@ async def update_specialist_pool(
) -> operation_async.AsyncOperation:
r"""Updates a SpecialistPool.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_update_specialist_pool():
+ # Create a client
+ client = aiplatform_v1beta1.SpecialistPoolServiceClient()
+
+ # Initialize request argument(s)
+ specialist_pool = aiplatform_v1beta1.SpecialistPool()
+ specialist_pool.name = "name_value"
+ specialist_pool.display_name = "display_name_value"
+
+ request = aiplatform_v1beta1.UpdateSpecialistPoolRequest(
+ specialist_pool=specialist_pool,
+ )
+
+ # Make the request
+ operation = client.update_specialist_pool(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.UpdateSpecialistPoolRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py
index eb68ebde3b..ecc3c55b60 100644
--- a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py
+++ b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py
@@ -428,6 +428,35 @@ def create_specialist_pool(
) -> gac_operation.Operation:
r"""Creates a SpecialistPool.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_specialist_pool():
+ # Create a client
+ client = aiplatform_v1beta1.SpecialistPoolServiceClient()
+
+ # Initialize request argument(s)
+ specialist_pool = aiplatform_v1beta1.SpecialistPool()
+ specialist_pool.name = "name_value"
+ specialist_pool.display_name = "display_name_value"
+
+ request = aiplatform_v1beta1.CreateSpecialistPoolRequest(
+ parent="parent_value",
+ specialist_pool=specialist_pool,
+ )
+
+ # Make the request
+ operation = client.create_specialist_pool(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateSpecialistPoolRequest, dict]):
The request object. Request message for
@@ -525,6 +554,26 @@ def get_specialist_pool(
) -> specialist_pool.SpecialistPool:
r"""Gets a SpecialistPool.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_specialist_pool():
+ # Create a client
+ client = aiplatform_v1beta1.SpecialistPoolServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetSpecialistPoolRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_specialist_pool(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetSpecialistPoolRequest, dict]):
The request object. Request message for
@@ -607,6 +656,27 @@ def list_specialist_pools(
) -> pagers.ListSpecialistPoolsPager:
r"""Lists SpecialistPools in a Location.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_specialist_pools():
+ # Create a client
+ client = aiplatform_v1beta1.SpecialistPoolServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListSpecialistPoolsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_specialist_pools(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsRequest, dict]):
The request object. Request message for
@@ -691,6 +761,31 @@ def delete_specialist_pool(
r"""Deletes a SpecialistPool as well as all Specialists
in the pool.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_specialist_pool():
+ # Create a client
+ client = aiplatform_v1beta1.SpecialistPoolServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteSpecialistPoolRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_specialist_pool(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteSpecialistPoolRequest, dict]):
The request object. Request message for
@@ -787,6 +882,34 @@ def update_specialist_pool(
) -> gac_operation.Operation:
r"""Updates a SpecialistPool.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_update_specialist_pool():
+ # Create a client
+ client = aiplatform_v1beta1.SpecialistPoolServiceClient()
+
+ # Initialize request argument(s)
+ specialist_pool = aiplatform_v1beta1.SpecialistPool()
+ specialist_pool.name = "name_value"
+ specialist_pool.display_name = "display_name_value"
+
+ request = aiplatform_v1beta1.UpdateSpecialistPoolRequest(
+ specialist_pool=specialist_pool,
+ )
+
+ # Make the request
+ operation = client.update_specialist_pool(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.UpdateSpecialistPoolRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py
index 8e31a5017b..88c1556cbb 100644
--- a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py
+++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py
@@ -262,6 +262,33 @@ async def create_tensorboard(
) -> operation_async.AsyncOperation:
r"""Creates a Tensorboard.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_tensorboard():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ tensorboard = aiplatform_v1beta1.Tensorboard()
+ tensorboard.display_name = "display_name_value"
+
+ request = aiplatform_v1beta1.CreateTensorboardRequest(
+ parent="parent_value",
+ tensorboard=tensorboard,
+ )
+
+ # Make the request
+ operation = client.create_tensorboard(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateTensorboardRequest, dict]):
The request object. Request message for
@@ -353,6 +380,25 @@ async def get_tensorboard(
) -> tensorboard.Tensorboard:
r"""Gets a Tensorboard.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_tensorboard():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetTensorboardRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_tensorboard(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetTensorboardRequest, dict]):
The request object. Request message for
@@ -429,6 +475,32 @@ async def update_tensorboard(
) -> operation_async.AsyncOperation:
r"""Updates a Tensorboard.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_update_tensorboard():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ tensorboard = aiplatform_v1beta1.Tensorboard()
+ tensorboard.display_name = "display_name_value"
+
+ request = aiplatform_v1beta1.UpdateTensorboardRequest(
+ tensorboard=tensorboard,
+ )
+
+ # Make the request
+ operation = client.update_tensorboard(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.UpdateTensorboardRequest, dict]):
The request object. Request message for
@@ -529,6 +601,26 @@ async def list_tensorboards(
) -> pagers.ListTensorboardsAsyncPager:
r"""Lists Tensorboards in a Location.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_tensorboards():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListTensorboardsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_tensorboards(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListTensorboardsRequest, dict]):
The request object. Request message for
@@ -610,6 +702,29 @@ async def delete_tensorboard(
) -> operation_async.AsyncOperation:
r"""Deletes a Tensorboard.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_tensorboard():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteTensorboardRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_tensorboard(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteTensorboardRequest, dict]):
The request object. Request message for
@@ -707,6 +822,26 @@ async def create_tensorboard_experiment(
) -> gca_tensorboard_experiment.TensorboardExperiment:
r"""Creates a TensorboardExperiment.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_tensorboard_experiment():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.CreateTensorboardExperimentRequest(
+ parent="parent_value",
+ tensorboard_experiment_id="tensorboard_experiment_id_value",
+ )
+
+ # Make the request
+ response = client.create_tensorboard_experiment(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateTensorboardExperimentRequest, dict]):
The request object. Request message for
@@ -805,6 +940,25 @@ async def get_tensorboard_experiment(
) -> tensorboard_experiment.TensorboardExperiment:
r"""Gets a TensorboardExperiment.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_tensorboard_experiment():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetTensorboardExperimentRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_tensorboard_experiment(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetTensorboardExperimentRequest, dict]):
The request object. Request message for
@@ -882,6 +1036,24 @@ async def update_tensorboard_experiment(
) -> gca_tensorboard_experiment.TensorboardExperiment:
r"""Updates a TensorboardExperiment.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_update_tensorboard_experiment():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.UpdateTensorboardExperimentRequest(
+ )
+
+ # Make the request
+ response = client.update_tensorboard_experiment(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.UpdateTensorboardExperimentRequest, dict]):
The request object. Request message for
@@ -975,6 +1147,26 @@ async def list_tensorboard_experiments(
) -> pagers.ListTensorboardExperimentsAsyncPager:
r"""Lists TensorboardExperiments in a Location.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_tensorboard_experiments():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListTensorboardExperimentsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_tensorboard_experiments(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsRequest, dict]):
The request object. Request message for
@@ -1059,6 +1251,29 @@ async def delete_tensorboard_experiment(
) -> operation_async.AsyncOperation:
r"""Deletes a TensorboardExperiment.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_tensorboard_experiment():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteTensorboardExperimentRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_tensorboard_experiment(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteTensorboardExperimentRequest, dict]):
The request object. Request message for
@@ -1154,6 +1369,30 @@ async def create_tensorboard_run(
) -> gca_tensorboard_run.TensorboardRun:
r"""Creates a TensorboardRun.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_tensorboard_run():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ tensorboard_run = aiplatform_v1beta1.TensorboardRun()
+ tensorboard_run.display_name = "display_name_value"
+
+ request = aiplatform_v1beta1.CreateTensorboardRunRequest(
+ parent="parent_value",
+ tensorboard_run=tensorboard_run,
+ tensorboard_run_id="tensorboard_run_id_value",
+ )
+
+ # Make the request
+ response = client.create_tensorboard_run(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateTensorboardRunRequest, dict]):
The request object. Request message for
@@ -1253,6 +1492,31 @@ async def batch_create_tensorboard_runs(
) -> tensorboard_service.BatchCreateTensorboardRunsResponse:
r"""Batch create TensorboardRuns.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_batch_create_tensorboard_runs():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ requests = aiplatform_v1beta1.CreateTensorboardRunRequest()
+ requests.parent = "parent_value"
+ requests.tensorboard_run.display_name = "display_name_value"
+ requests.tensorboard_run_id = "tensorboard_run_id_value"
+
+ request = aiplatform_v1beta1.BatchCreateTensorboardRunsRequest(
+ parent="parent_value",
+ requests=requests,
+ )
+
+ # Make the request
+ response = client.batch_create_tensorboard_runs(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardRunsRequest, dict]):
The request object. Request message for
@@ -1339,6 +1603,25 @@ async def get_tensorboard_run(
) -> tensorboard_run.TensorboardRun:
r"""Gets a TensorboardRun.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_tensorboard_run():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetTensorboardRunRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_tensorboard_run(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetTensorboardRunRequest, dict]):
The request object. Request message for
@@ -1414,6 +1697,28 @@ async def update_tensorboard_run(
) -> gca_tensorboard_run.TensorboardRun:
r"""Updates a TensorboardRun.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_update_tensorboard_run():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ tensorboard_run = aiplatform_v1beta1.TensorboardRun()
+ tensorboard_run.display_name = "display_name_value"
+
+ request = aiplatform_v1beta1.UpdateTensorboardRunRequest(
+ tensorboard_run=tensorboard_run,
+ )
+
+ # Make the request
+ response = client.update_tensorboard_run(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.UpdateTensorboardRunRequest, dict]):
The request object. Request message for
@@ -1504,6 +1809,26 @@ async def list_tensorboard_runs(
) -> pagers.ListTensorboardRunsAsyncPager:
r"""Lists TensorboardRuns in a Location.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_tensorboard_runs():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListTensorboardRunsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_tensorboard_runs(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsRequest, dict]):
The request object. Request message for
@@ -1586,6 +1911,29 @@ async def delete_tensorboard_run(
) -> operation_async.AsyncOperation:
r"""Deletes a TensorboardRun.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_tensorboard_run():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteTensorboardRunRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_tensorboard_run(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteTensorboardRunRequest, dict]):
The request object. Request message for
@@ -1685,6 +2033,32 @@ async def batch_create_tensorboard_time_series(
r"""Batch create TensorboardTimeSeries that belong to a
TensorboardExperiment.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_batch_create_tensorboard_time_series():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ requests = aiplatform_v1beta1.CreateTensorboardTimeSeriesRequest()
+ requests.parent = "parent_value"
+ requests.tensorboard_time_series.display_name = "display_name_value"
+ requests.tensorboard_time_series.value_type = "BLOB_SEQUENCE"
+
+ request = aiplatform_v1beta1.BatchCreateTensorboardTimeSeriesRequest(
+ parent="parent_value",
+ requests=requests,
+ )
+
+ # Make the request
+ response = client.batch_create_tensorboard_time_series(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardTimeSeriesRequest, dict]):
The request object. Request message for
@@ -1775,6 +2149,30 @@ async def create_tensorboard_time_series(
) -> gca_tensorboard_time_series.TensorboardTimeSeries:
r"""Creates a TensorboardTimeSeries.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_tensorboard_time_series():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ tensorboard_time_series = aiplatform_v1beta1.TensorboardTimeSeries()
+ tensorboard_time_series.display_name = "display_name_value"
+ tensorboard_time_series.value_type = "BLOB_SEQUENCE"
+
+ request = aiplatform_v1beta1.CreateTensorboardTimeSeriesRequest(
+ parent="parent_value",
+ tensorboard_time_series=tensorboard_time_series,
+ )
+
+ # Make the request
+ response = client.create_tensorboard_time_series(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateTensorboardTimeSeriesRequest, dict]):
The request object. Request message for
@@ -1858,6 +2256,25 @@ async def get_tensorboard_time_series(
) -> tensorboard_time_series.TensorboardTimeSeries:
r"""Gets a TensorboardTimeSeries.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_tensorboard_time_series():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetTensorboardTimeSeriesRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_tensorboard_time_series(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetTensorboardTimeSeriesRequest, dict]):
The request object. Request message for
@@ -1933,6 +2350,29 @@ async def update_tensorboard_time_series(
) -> gca_tensorboard_time_series.TensorboardTimeSeries:
r"""Updates a TensorboardTimeSeries.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_update_tensorboard_time_series():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ tensorboard_time_series = aiplatform_v1beta1.TensorboardTimeSeries()
+ tensorboard_time_series.display_name = "display_name_value"
+ tensorboard_time_series.value_type = "BLOB_SEQUENCE"
+
+ request = aiplatform_v1beta1.UpdateTensorboardTimeSeriesRequest(
+ tensorboard_time_series=tensorboard_time_series,
+ )
+
+ # Make the request
+ response = client.update_tensorboard_time_series(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.UpdateTensorboardTimeSeriesRequest, dict]):
The request object. Request message for
@@ -2029,6 +2469,26 @@ async def list_tensorboard_time_series(
) -> pagers.ListTensorboardTimeSeriesAsyncPager:
r"""Lists TensorboardTimeSeries in a Location.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_tensorboard_time_series():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListTensorboardTimeSeriesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_tensorboard_time_series(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesRequest, dict]):
The request object. Request message for
@@ -2113,6 +2573,29 @@ async def delete_tensorboard_time_series(
) -> operation_async.AsyncOperation:
r"""Deletes a TensorboardTimeSeries.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_tensorboard_time_series():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteTensorboardTimeSeriesRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_tensorboard_time_series(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteTensorboardTimeSeriesRequest, dict]):
The request object. Request message for
@@ -2213,6 +2696,27 @@ async def batch_read_tensorboard_time_series_data(
Otherwise, that limit number of data points will be
randomly selected from this time series and returned.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_batch_read_tensorboard_time_series_data():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.BatchReadTensorboardTimeSeriesDataRequest(
+ tensorboard="tensorboard_value",
+ time_series=['time_series_value_1', 'time_series_value_2'],
+ )
+
+ # Make the request
+ response = client.batch_read_tensorboard_time_series_data(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.BatchReadTensorboardTimeSeriesDataRequest, dict]):
The request object. Request message for
@@ -2297,6 +2801,26 @@ async def read_tensorboard_time_series_data(
from this time series and returned. This value can be changed by
changing max_data_points, which can't be greater than 10k.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_read_tensorboard_time_series_data():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ReadTensorboardTimeSeriesDataRequest(
+ tensorboard_time_series="tensorboard_time_series_value",
+ )
+
+ # Make the request
+ response = client.read_tensorboard_time_series_data(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ReadTensorboardTimeSeriesDataRequest, dict]):
The request object. Request message for
@@ -2374,6 +2898,27 @@ def read_tensorboard_blob_data(
project's Cloud Storage bucket without users having to
obtain Cloud Storage access permission.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_read_tensorboard_blob_data():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ReadTensorboardBlobDataRequest(
+ time_series="time_series_value",
+ )
+
+ # Make the request
+ stream = client.read_tensorboard_blob_data(request=request)
+
+ # Handle the response
+ for response in stream:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ReadTensorboardBlobDataRequest, dict]):
The request object. Request message for
@@ -2455,6 +3000,32 @@ async def write_tensorboard_experiment_data(
TensorboardTimeSeries in multiple TensorboardRun's. If
any data fail to be ingested, an error will be returned.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_write_tensorboard_experiment_data():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ write_run_data_requests = aiplatform_v1beta1.WriteTensorboardRunDataRequest()
+ write_run_data_requests.tensorboard_run = "tensorboard_run_value"
+ write_run_data_requests.time_series_data.tensorboard_time_series_id = "tensorboard_time_series_id_value"
+ write_run_data_requests.time_series_data.value_type = "BLOB_SEQUENCE"
+
+ request = aiplatform_v1beta1.WriteTensorboardExperimentDataRequest(
+ tensorboard_experiment="tensorboard_experiment_value",
+ write_run_data_requests=write_run_data_requests,
+ )
+
+ # Make the request
+ response = client.write_tensorboard_experiment_data(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.WriteTensorboardExperimentDataRequest, dict]):
The request object. Request message for
@@ -2541,6 +3112,31 @@ async def write_tensorboard_run_data(
TensorboardTimeSeries under a TensorboardRun. If any
data fail to be ingested, an error will be returned.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_write_tensorboard_run_data():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ time_series_data = aiplatform_v1beta1.TimeSeriesData()
+ time_series_data.tensorboard_time_series_id = "tensorboard_time_series_id_value"
+ time_series_data.value_type = "BLOB_SEQUENCE"
+
+ request = aiplatform_v1beta1.WriteTensorboardRunDataRequest(
+ tensorboard_run="tensorboard_run_value",
+ time_series_data=time_series_data,
+ )
+
+ # Make the request
+ response = client.write_tensorboard_run_data(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataRequest, dict]):
The request object. Request message for
@@ -2633,6 +3229,27 @@ async def export_tensorboard_time_series_data(
r"""Exports a TensorboardTimeSeries' data. Data is
returned in paginated responses.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_export_tensorboard_time_series_data():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ExportTensorboardTimeSeriesDataRequest(
+ tensorboard_time_series="tensorboard_time_series_value",
+ )
+
+ # Make the request
+ page_result = client.export_tensorboard_time_series_data(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py
index a513ce188e..5e4e60a358 100644
--- a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py
+++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py
@@ -504,6 +504,34 @@ def create_tensorboard(
) -> gac_operation.Operation:
r"""Creates a Tensorboard.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_tensorboard():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ tensorboard = aiplatform_v1beta1.Tensorboard()
+ tensorboard.display_name = "display_name_value"
+
+ request = aiplatform_v1beta1.CreateTensorboardRequest(
+ parent="parent_value",
+ tensorboard=tensorboard,
+ )
+
+ # Make the request
+ operation = client.create_tensorboard(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateTensorboardRequest, dict]):
The request object. Request message for
@@ -595,6 +623,26 @@ def get_tensorboard(
) -> tensorboard.Tensorboard:
r"""Gets a Tensorboard.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_tensorboard():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetTensorboardRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_tensorboard(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetTensorboardRequest, dict]):
The request object. Request message for
@@ -671,6 +719,33 @@ def update_tensorboard(
) -> gac_operation.Operation:
r"""Updates a Tensorboard.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_update_tensorboard():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ tensorboard = aiplatform_v1beta1.Tensorboard()
+ tensorboard.display_name = "display_name_value"
+
+ request = aiplatform_v1beta1.UpdateTensorboardRequest(
+ tensorboard=tensorboard,
+ )
+
+ # Make the request
+ operation = client.update_tensorboard(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.UpdateTensorboardRequest, dict]):
The request object. Request message for
@@ -771,6 +846,27 @@ def list_tensorboards(
) -> pagers.ListTensorboardsPager:
r"""Lists Tensorboards in a Location.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_tensorboards():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListTensorboardsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_tensorboards(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListTensorboardsRequest, dict]):
The request object. Request message for
@@ -852,6 +948,30 @@ def delete_tensorboard(
) -> gac_operation.Operation:
r"""Deletes a Tensorboard.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_tensorboard():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteTensorboardRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_tensorboard(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteTensorboardRequest, dict]):
The request object. Request message for
@@ -949,6 +1069,27 @@ def create_tensorboard_experiment(
) -> gca_tensorboard_experiment.TensorboardExperiment:
r"""Creates a TensorboardExperiment.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_tensorboard_experiment():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.CreateTensorboardExperimentRequest(
+ parent="parent_value",
+ tensorboard_experiment_id="tensorboard_experiment_id_value",
+ )
+
+ # Make the request
+ response = client.create_tensorboard_experiment(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateTensorboardExperimentRequest, dict]):
The request object. Request message for
@@ -1051,6 +1192,26 @@ def get_tensorboard_experiment(
) -> tensorboard_experiment.TensorboardExperiment:
r"""Gets a TensorboardExperiment.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_tensorboard_experiment():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetTensorboardExperimentRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_tensorboard_experiment(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetTensorboardExperimentRequest, dict]):
The request object. Request message for
@@ -1130,6 +1291,25 @@ def update_tensorboard_experiment(
) -> gca_tensorboard_experiment.TensorboardExperiment:
r"""Updates a TensorboardExperiment.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_update_tensorboard_experiment():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.UpdateTensorboardExperimentRequest(
+ )
+
+ # Make the request
+ response = client.update_tensorboard_experiment(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.UpdateTensorboardExperimentRequest, dict]):
The request object. Request message for
@@ -1227,6 +1407,27 @@ def list_tensorboard_experiments(
) -> pagers.ListTensorboardExperimentsPager:
r"""Lists TensorboardExperiments in a Location.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_tensorboard_experiments():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListTensorboardExperimentsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_tensorboard_experiments(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsRequest, dict]):
The request object. Request message for
@@ -1315,6 +1516,30 @@ def delete_tensorboard_experiment(
) -> gac_operation.Operation:
r"""Deletes a TensorboardExperiment.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_tensorboard_experiment():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteTensorboardExperimentRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_tensorboard_experiment(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteTensorboardExperimentRequest, dict]):
The request object. Request message for
@@ -1414,6 +1639,31 @@ def create_tensorboard_run(
) -> gca_tensorboard_run.TensorboardRun:
r"""Creates a TensorboardRun.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_tensorboard_run():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ tensorboard_run = aiplatform_v1beta1.TensorboardRun()
+ tensorboard_run.display_name = "display_name_value"
+
+ request = aiplatform_v1beta1.CreateTensorboardRunRequest(
+ parent="parent_value",
+ tensorboard_run=tensorboard_run,
+ tensorboard_run_id="tensorboard_run_id_value",
+ )
+
+ # Make the request
+ response = client.create_tensorboard_run(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateTensorboardRunRequest, dict]):
The request object. Request message for
@@ -1513,6 +1763,32 @@ def batch_create_tensorboard_runs(
) -> tensorboard_service.BatchCreateTensorboardRunsResponse:
r"""Batch create TensorboardRuns.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_batch_create_tensorboard_runs():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ requests = aiplatform_v1beta1.CreateTensorboardRunRequest()
+ requests.parent = "parent_value"
+ requests.tensorboard_run.display_name = "display_name_value"
+ requests.tensorboard_run_id = "tensorboard_run_id_value"
+
+ request = aiplatform_v1beta1.BatchCreateTensorboardRunsRequest(
+ parent="parent_value",
+ requests=requests,
+ )
+
+ # Make the request
+ response = client.batch_create_tensorboard_runs(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardRunsRequest, dict]):
The request object. Request message for
@@ -1603,6 +1879,26 @@ def get_tensorboard_run(
) -> tensorboard_run.TensorboardRun:
r"""Gets a TensorboardRun.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_tensorboard_run():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetTensorboardRunRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_tensorboard_run(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetTensorboardRunRequest, dict]):
The request object. Request message for
@@ -1678,6 +1974,29 @@ def update_tensorboard_run(
) -> gca_tensorboard_run.TensorboardRun:
r"""Updates a TensorboardRun.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_update_tensorboard_run():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ tensorboard_run = aiplatform_v1beta1.TensorboardRun()
+ tensorboard_run.display_name = "display_name_value"
+
+ request = aiplatform_v1beta1.UpdateTensorboardRunRequest(
+ tensorboard_run=tensorboard_run,
+ )
+
+ # Make the request
+ response = client.update_tensorboard_run(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.UpdateTensorboardRunRequest, dict]):
The request object. Request message for
@@ -1768,6 +2087,27 @@ def list_tensorboard_runs(
) -> pagers.ListTensorboardRunsPager:
r"""Lists TensorboardRuns in a Location.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_tensorboard_runs():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListTensorboardRunsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_tensorboard_runs(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsRequest, dict]):
The request object. Request message for
@@ -1850,6 +2190,30 @@ def delete_tensorboard_run(
) -> gac_operation.Operation:
r"""Deletes a TensorboardRun.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_tensorboard_run():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteTensorboardRunRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_tensorboard_run(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteTensorboardRunRequest, dict]):
The request object. Request message for
@@ -1949,6 +2313,33 @@ def batch_create_tensorboard_time_series(
r"""Batch create TensorboardTimeSeries that belong to a
TensorboardExperiment.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_batch_create_tensorboard_time_series():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ requests = aiplatform_v1beta1.CreateTensorboardTimeSeriesRequest()
+ requests.parent = "parent_value"
+ requests.tensorboard_time_series.display_name = "display_name_value"
+ requests.tensorboard_time_series.value_type = "BLOB_SEQUENCE"
+
+ request = aiplatform_v1beta1.BatchCreateTensorboardTimeSeriesRequest(
+ parent="parent_value",
+ requests=requests,
+ )
+
+ # Make the request
+ response = client.batch_create_tensorboard_time_series(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardTimeSeriesRequest, dict]):
The request object. Request message for
@@ -2045,6 +2436,31 @@ def create_tensorboard_time_series(
) -> gca_tensorboard_time_series.TensorboardTimeSeries:
r"""Creates a TensorboardTimeSeries.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_tensorboard_time_series():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ tensorboard_time_series = aiplatform_v1beta1.TensorboardTimeSeries()
+ tensorboard_time_series.display_name = "display_name_value"
+ tensorboard_time_series.value_type = "BLOB_SEQUENCE"
+
+ request = aiplatform_v1beta1.CreateTensorboardTimeSeriesRequest(
+ parent="parent_value",
+ tensorboard_time_series=tensorboard_time_series,
+ )
+
+ # Make the request
+ response = client.create_tensorboard_time_series(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateTensorboardTimeSeriesRequest, dict]):
The request object. Request message for
@@ -2132,6 +2548,26 @@ def get_tensorboard_time_series(
) -> tensorboard_time_series.TensorboardTimeSeries:
r"""Gets a TensorboardTimeSeries.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_tensorboard_time_series():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetTensorboardTimeSeriesRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_tensorboard_time_series(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetTensorboardTimeSeriesRequest, dict]):
The request object. Request message for
@@ -2209,6 +2645,30 @@ def update_tensorboard_time_series(
) -> gca_tensorboard_time_series.TensorboardTimeSeries:
r"""Updates a TensorboardTimeSeries.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_update_tensorboard_time_series():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ tensorboard_time_series = aiplatform_v1beta1.TensorboardTimeSeries()
+ tensorboard_time_series.display_name = "display_name_value"
+ tensorboard_time_series.value_type = "BLOB_SEQUENCE"
+
+ request = aiplatform_v1beta1.UpdateTensorboardTimeSeriesRequest(
+ tensorboard_time_series=tensorboard_time_series,
+ )
+
+ # Make the request
+ response = client.update_tensorboard_time_series(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.UpdateTensorboardTimeSeriesRequest, dict]):
The request object. Request message for
@@ -2309,6 +2769,27 @@ def list_tensorboard_time_series(
) -> pagers.ListTensorboardTimeSeriesPager:
r"""Lists TensorboardTimeSeries in a Location.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_tensorboard_time_series():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListTensorboardTimeSeriesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_tensorboard_time_series(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesRequest, dict]):
The request object. Request message for
@@ -2397,6 +2878,30 @@ def delete_tensorboard_time_series(
) -> gac_operation.Operation:
r"""Deletes a TensorboardTimeSeries.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_tensorboard_time_series():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteTensorboardTimeSeriesRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_tensorboard_time_series(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteTensorboardTimeSeriesRequest, dict]):
The request object. Request message for
@@ -2501,6 +3006,28 @@ def batch_read_tensorboard_time_series_data(
Otherwise, that limit number of data points will be
randomly selected from this time series and returned.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_batch_read_tensorboard_time_series_data():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.BatchReadTensorboardTimeSeriesDataRequest(
+ tensorboard="tensorboard_value",
+ time_series=['time_series_value_1', 'time_series_value_2'],
+ )
+
+ # Make the request
+ response = client.batch_read_tensorboard_time_series_data(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.BatchReadTensorboardTimeSeriesDataRequest, dict]):
The request object. Request message for
@@ -2591,6 +3118,27 @@ def read_tensorboard_time_series_data(
from this time series and returned. This value can be changed by
changing max_data_points, which can't be greater than 10k.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_read_tensorboard_time_series_data():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ReadTensorboardTimeSeriesDataRequest(
+ tensorboard_time_series="tensorboard_time_series_value",
+ )
+
+ # Make the request
+ response = client.read_tensorboard_time_series_data(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ReadTensorboardTimeSeriesDataRequest, dict]):
The request object. Request message for
@@ -2672,6 +3220,28 @@ def read_tensorboard_blob_data(
project's Cloud Storage bucket without users having to
obtain Cloud Storage access permission.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_read_tensorboard_blob_data():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ReadTensorboardBlobDataRequest(
+ time_series="time_series_value",
+ )
+
+ # Make the request
+ stream = client.read_tensorboard_blob_data(request=request)
+
+ # Handle the response
+ for response in stream:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ReadTensorboardBlobDataRequest, dict]):
The request object. Request message for
@@ -2755,6 +3325,33 @@ def write_tensorboard_experiment_data(
TensorboardTimeSeries in multiple TensorboardRun's. If
any data fail to be ingested, an error will be returned.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_write_tensorboard_experiment_data():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ write_run_data_requests = aiplatform_v1beta1.WriteTensorboardRunDataRequest()
+ write_run_data_requests.tensorboard_run = "tensorboard_run_value"
+ write_run_data_requests.time_series_data.tensorboard_time_series_id = "tensorboard_time_series_id_value"
+ write_run_data_requests.time_series_data.value_type = "BLOB_SEQUENCE"
+
+ request = aiplatform_v1beta1.WriteTensorboardExperimentDataRequest(
+ tensorboard_experiment="tensorboard_experiment_value",
+ write_run_data_requests=write_run_data_requests,
+ )
+
+ # Make the request
+ response = client.write_tensorboard_experiment_data(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.WriteTensorboardExperimentDataRequest, dict]):
The request object. Request message for
@@ -2845,6 +3442,32 @@ def write_tensorboard_run_data(
TensorboardTimeSeries under a TensorboardRun. If any
data fail to be ingested, an error will be returned.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_write_tensorboard_run_data():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ time_series_data = aiplatform_v1beta1.TimeSeriesData()
+ time_series_data.tensorboard_time_series_id = "tensorboard_time_series_id_value"
+ time_series_data.value_type = "BLOB_SEQUENCE"
+
+ request = aiplatform_v1beta1.WriteTensorboardRunDataRequest(
+ tensorboard_run="tensorboard_run_value",
+ time_series_data=time_series_data,
+ )
+
+ # Make the request
+ response = client.write_tensorboard_run_data(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataRequest, dict]):
The request object. Request message for
@@ -2939,6 +3562,28 @@ def export_tensorboard_time_series_data(
r"""Exports a TensorboardTimeSeries' data. Data is
returned in paginated responses.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_export_tensorboard_time_series_data():
+ # Create a client
+ client = aiplatform_v1beta1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ExportTensorboardTimeSeriesDataRequest(
+ tensorboard_time_series="tensorboard_time_series_value",
+ )
+
+ # Make the request
+ page_result = client.export_tensorboard_time_series_data(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py
index c55267c36f..a4d01e2e2d 100644
--- a/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py
+++ b/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py
@@ -228,6 +228,35 @@ async def create_study(
r"""Creates a Study. A resource name will be generated
after creation of the Study.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_study():
+ # Create a client
+ client = aiplatform_v1beta1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ study = aiplatform_v1beta1.Study()
+ study.display_name = "display_name_value"
+ study.study_spec.metrics.metric_id = "metric_id_value"
+ study.study_spec.metrics.goal = "MINIMIZE"
+ study.study_spec.parameters.double_value_spec.min_value = 0.96
+ study.study_spec.parameters.double_value_spec.max_value = 0.962
+ study.study_spec.parameters.parameter_id = "parameter_id_value"
+
+ request = aiplatform_v1beta1.CreateStudyRequest(
+ parent="parent_value",
+ study=study,
+ )
+
+ # Make the request
+ response = client.create_study(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateStudyRequest, dict]):
The request object. Request message for
@@ -307,6 +336,25 @@ async def get_study(
) -> study.Study:
r"""Gets a Study by name.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_study():
+ # Create a client
+ client = aiplatform_v1beta1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetStudyRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_study(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetStudyRequest, dict]):
The request object. Request message for
@@ -377,6 +425,27 @@ async def list_studies(
r"""Lists all the studies in a region for an associated
project.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_studies():
+ # Create a client
+ client = aiplatform_v1beta1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListStudiesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_studies(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListStudiesRequest, dict]):
The request object. Request message for
@@ -458,6 +527,22 @@ async def delete_study(
) -> None:
r"""Deletes a Study.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_study():
+ # Create a client
+ client = aiplatform_v1beta1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteStudyRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.delete_study(request=request)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteStudyRequest, dict]):
The request object. Request message for
@@ -524,6 +609,27 @@ async def lookup_study(
r"""Looks a study up using the user-defined display_name field
instead of the fully qualified resource name.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_lookup_study():
+ # Create a client
+ client = aiplatform_v1beta1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.LookupStudyRequest(
+ parent="parent_value",
+ display_name="display_name_value",
+ )
+
+ # Make the request
+ response = client.lookup_study(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.LookupStudyRequest, dict]):
The request object. Request message for
@@ -597,6 +703,32 @@ async def suggest_trials(
long-running operation succeeds, it will contain a
[SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse].
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_suggest_trials():
+ # Create a client
+ client = aiplatform_v1beta1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.SuggestTrialsRequest(
+ parent="parent_value",
+ suggestion_count=1744,
+ client_id="client_id_value",
+ )
+
+ # Make the request
+ operation = client.suggest_trials(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.SuggestTrialsRequest, dict]):
The request object. Request message for
@@ -660,6 +792,25 @@ async def create_trial(
) -> study.Trial:
r"""Adds a user provided Trial to a Study.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_trial():
+ # Create a client
+ client = aiplatform_v1beta1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.CreateTrialRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ response = client.create_trial(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateTrialRequest, dict]):
The request object. Request message for
@@ -742,6 +893,25 @@ async def get_trial(
) -> study.Trial:
r"""Gets a Trial.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_trial():
+ # Create a client
+ client = aiplatform_v1beta1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetTrialRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_trial(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetTrialRequest, dict]):
The request object. Request message for
@@ -816,6 +986,26 @@ async def list_trials(
) -> pagers.ListTrialsAsyncPager:
r"""Lists the Trials associated with a Study.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_trials():
+ # Create a client
+ client = aiplatform_v1beta1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListTrialsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_trials(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListTrialsRequest, dict]):
The request object. Request message for
@@ -898,6 +1088,26 @@ async def add_trial_measurement(
Trial. This measurement is assumed to have been taken
before the Trial is complete.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_add_trial_measurement():
+ # Create a client
+ client = aiplatform_v1beta1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.AddTrialMeasurementRequest(
+ trial_name="trial_name_value",
+ )
+
+ # Make the request
+ response = client.add_trial_measurement(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.AddTrialMeasurementRequest, dict]):
The request object. Request message for
@@ -952,6 +1162,25 @@ async def complete_trial(
) -> study.Trial:
r"""Marks a Trial as complete.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_complete_trial():
+ # Create a client
+ client = aiplatform_v1beta1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.CompleteTrialRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.complete_trial(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CompleteTrialRequest, dict]):
The request object. Request message for
@@ -1005,6 +1234,22 @@ async def delete_trial(
) -> None:
r"""Deletes a Trial.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_trial():
+ # Create a client
+ client = aiplatform_v1beta1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteTrialRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.delete_trial(request=request)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteTrialRequest, dict]):
The request object. Request message for
@@ -1071,6 +1316,30 @@ async def check_trial_early_stopping_state(
will contain a
[CheckTrialEarlyStoppingStateResponse][google.cloud.ml.v1.CheckTrialEarlyStoppingStateResponse].
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_check_trial_early_stopping_state():
+ # Create a client
+ client = aiplatform_v1beta1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.CheckTrialEarlyStoppingStateRequest(
+ trial_name="trial_name_value",
+ )
+
+ # Make the request
+ operation = client.check_trial_early_stopping_state(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CheckTrialEarlyStoppingStateRequest, dict]):
The request object. Request message for
@@ -1134,6 +1403,25 @@ async def stop_trial(
) -> study.Trial:
r"""Stops a Trial.
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_stop_trial():
+ # Create a client
+ client = aiplatform_v1beta1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.StopTrialRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.stop_trial(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.StopTrialRequest, dict]):
The request object. Request message for
@@ -1190,6 +1478,26 @@ async def list_optimal_trials(
pareto-optimal can be checked in wiki page.
https://en.wikipedia.org/wiki/Pareto_efficiency
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_optimal_trials():
+ # Create a client
+ client = aiplatform_v1beta1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListOptimalTrialsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ response = client.list_optimal_trials(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListOptimalTrialsRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py b/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py
index 0d8d1b9700..721b9baf58 100644
--- a/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py
+++ b/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py
@@ -451,6 +451,36 @@ def create_study(
r"""Creates a Study. A resource name will be generated
after creation of the Study.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_study():
+ # Create a client
+ client = aiplatform_v1beta1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ study = aiplatform_v1beta1.Study()
+ study.display_name = "display_name_value"
+ study.study_spec.metrics.metric_id = "metric_id_value"
+ study.study_spec.metrics.goal = "MINIMIZE"
+ study.study_spec.parameters.double_value_spec.min_value = 0.96
+ study.study_spec.parameters.double_value_spec.max_value = 0.962
+ study.study_spec.parameters.parameter_id = "parameter_id_value"
+
+ request = aiplatform_v1beta1.CreateStudyRequest(
+ parent="parent_value",
+ study=study,
+ )
+
+ # Make the request
+ response = client.create_study(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateStudyRequest, dict]):
The request object. Request message for
@@ -530,6 +560,26 @@ def get_study(
) -> study.Study:
r"""Gets a Study by name.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_study():
+ # Create a client
+ client = aiplatform_v1beta1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetStudyRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_study(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetStudyRequest, dict]):
The request object. Request message for
@@ -600,6 +650,28 @@ def list_studies(
r"""Lists all the studies in a region for an associated
project.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_studies():
+ # Create a client
+ client = aiplatform_v1beta1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListStudiesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_studies(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListStudiesRequest, dict]):
The request object. Request message for
@@ -681,6 +753,23 @@ def delete_study(
) -> None:
r"""Deletes a Study.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_study():
+ # Create a client
+ client = aiplatform_v1beta1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteStudyRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.delete_study(request=request)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteStudyRequest, dict]):
The request object. Request message for
@@ -747,6 +836,28 @@ def lookup_study(
r"""Looks a study up using the user-defined display_name field
instead of the fully qualified resource name.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_lookup_study():
+ # Create a client
+ client = aiplatform_v1beta1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.LookupStudyRequest(
+ parent="parent_value",
+ display_name="display_name_value",
+ )
+
+ # Make the request
+ response = client.lookup_study(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.LookupStudyRequest, dict]):
The request object. Request message for
@@ -820,6 +931,33 @@ def suggest_trials(
long-running operation succeeds, it will contain a
[SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse].
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_suggest_trials():
+ # Create a client
+ client = aiplatform_v1beta1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.SuggestTrialsRequest(
+ parent="parent_value",
+ suggestion_count=1744,
+ client_id="client_id_value",
+ )
+
+ # Make the request
+ operation = client.suggest_trials(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.SuggestTrialsRequest, dict]):
The request object. Request message for
@@ -884,6 +1022,26 @@ def create_trial(
) -> study.Trial:
r"""Adds a user provided Trial to a Study.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_create_trial():
+ # Create a client
+ client = aiplatform_v1beta1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.CreateTrialRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ response = client.create_trial(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateTrialRequest, dict]):
The request object. Request message for
@@ -966,6 +1124,26 @@ def get_trial(
) -> study.Trial:
r"""Gets a Trial.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_get_trial():
+ # Create a client
+ client = aiplatform_v1beta1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.GetTrialRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_trial(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetTrialRequest, dict]):
The request object. Request message for
@@ -1040,6 +1218,27 @@ def list_trials(
) -> pagers.ListTrialsPager:
r"""Lists the Trials associated with a Study.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_trials():
+ # Create a client
+ client = aiplatform_v1beta1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListTrialsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_trials(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListTrialsRequest, dict]):
The request object. Request message for
@@ -1122,6 +1321,27 @@ def add_trial_measurement(
Trial. This measurement is assumed to have been taken
before the Trial is complete.
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_add_trial_measurement():
+ # Create a client
+ client = aiplatform_v1beta1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.AddTrialMeasurementRequest(
+ trial_name="trial_name_value",
+ )
+
+ # Make the request
+ response = client.add_trial_measurement(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.AddTrialMeasurementRequest, dict]):
The request object. Request message for
@@ -1177,6 +1397,26 @@ def complete_trial(
) -> study.Trial:
r"""Marks a Trial as complete.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_complete_trial():
+ # Create a client
+ client = aiplatform_v1beta1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.CompleteTrialRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.complete_trial(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CompleteTrialRequest, dict]):
The request object. Request message for
@@ -1231,6 +1471,23 @@ def delete_trial(
) -> None:
r"""Deletes a Trial.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_delete_trial():
+ # Create a client
+ client = aiplatform_v1beta1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.DeleteTrialRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ client.delete_trial(request=request)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteTrialRequest, dict]):
The request object. Request message for
@@ -1297,6 +1554,31 @@ def check_trial_early_stopping_state(
will contain a
[CheckTrialEarlyStoppingStateResponse][google.cloud.ml.v1.CheckTrialEarlyStoppingStateResponse].
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_check_trial_early_stopping_state():
+ # Create a client
+ client = aiplatform_v1beta1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.CheckTrialEarlyStoppingStateRequest(
+ trial_name="trial_name_value",
+ )
+
+ # Make the request
+ operation = client.check_trial_early_stopping_state(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CheckTrialEarlyStoppingStateRequest, dict]):
The request object. Request message for
@@ -1363,6 +1645,26 @@ def stop_trial(
) -> study.Trial:
r"""Stops a Trial.
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_stop_trial():
+ # Create a client
+ client = aiplatform_v1beta1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.StopTrialRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.stop_trial(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.StopTrialRequest, dict]):
The request object. Request message for
@@ -1420,6 +1722,27 @@ def list_optimal_trials(
pareto-optimal can be checked in wiki page.
https://en.wikipedia.org/wiki/Pareto_efficiency
+
+
+ .. code-block::
+
+ from google.cloud import aiplatform_v1beta1
+
+ def sample_list_optimal_trials():
+ # Create a client
+ client = aiplatform_v1beta1.VizierServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1beta1.ListOptimalTrialsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ response = client.list_optimal_trials(request=request)
+
+ # Handle the response
+ print(response)
+
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListOptimalTrialsRequest, dict]):
The request object. Request message for
diff --git a/google/cloud/aiplatform_v1beta1/types/accelerator_type.py b/google/cloud/aiplatform_v1beta1/types/accelerator_type.py
index 6fb2d8899d..eb7ff5322d 100644
--- a/google/cloud/aiplatform_v1beta1/types/accelerator_type.py
+++ b/google/cloud/aiplatform_v1beta1/types/accelerator_type.py
@@ -30,6 +30,8 @@ class AcceleratorType(proto.Enum):
NVIDIA_TESLA_P4 = 4
NVIDIA_TESLA_T4 = 5
NVIDIA_TESLA_A100 = 8
+ TPU_V2 = 6
+ TPU_V3 = 7
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform_v1beta1/types/job_service.py b/google/cloud/aiplatform_v1beta1/types/job_service.py
index 0988a81f3c..2ef0eea5dd 100644
--- a/google/cloud/aiplatform_v1beta1/types/job_service.py
+++ b/google/cloud/aiplatform_v1beta1/types/job_service.py
@@ -630,7 +630,7 @@ class SearchModelDeploymentMonitoringStatsAnomaliesRequest(proto.Message):
model_deployment_monitoring_job (str):
Required. ModelDeploymentMonitoring Job resource name.
Format:
- \`projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}
+ ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}``
deployed_model_id (str):
Required. The DeployedModel ID of the
[ModelDeploymentMonitoringObjectiveConfig.deployed_model_id].
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_create_dataset_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_create_dataset_async.py
index d66060dd77..17d0fc381d 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_create_dataset_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_create_dataset_async.py
@@ -28,8 +28,6 @@
async def sample_create_dataset():
- """Snippet for create_dataset"""
-
# Create a client
client = aiplatform_v1.DatasetServiceAsyncClient()
@@ -40,7 +38,7 @@ async def sample_create_dataset():
dataset.metadata.null_value = "NULL_VALUE"
request = aiplatform_v1.CreateDatasetRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
dataset=dataset,
)
@@ -50,6 +48,8 @@ async def sample_create_dataset():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_DatasetService_CreateDataset_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_create_dataset_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_create_dataset_sync.py
index 67028e90df..e0f69bb8db 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_create_dataset_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_create_dataset_sync.py
@@ -28,8 +28,6 @@
def sample_create_dataset():
- """Snippet for create_dataset"""
-
# Create a client
client = aiplatform_v1.DatasetServiceClient()
@@ -40,7 +38,7 @@ def sample_create_dataset():
dataset.metadata.null_value = "NULL_VALUE"
request = aiplatform_v1.CreateDatasetRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
dataset=dataset,
)
@@ -50,6 +48,8 @@ def sample_create_dataset():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_DatasetService_CreateDataset_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_delete_dataset_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_delete_dataset_async.py
index 6fb840cb71..fdb4fb4133 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_delete_dataset_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_delete_dataset_async.py
@@ -28,14 +28,12 @@
async def sample_delete_dataset():
- """Snippet for delete_dataset"""
-
# Create a client
client = aiplatform_v1.DatasetServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteDatasetRequest(
- name="projects/{project}/locations/{location}/datasets/{dataset}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_dataset():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_DatasetService_DeleteDataset_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_delete_dataset_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_delete_dataset_sync.py
index bbce6a2f7d..92fa79532c 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_delete_dataset_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_delete_dataset_sync.py
@@ -28,14 +28,12 @@
def sample_delete_dataset():
- """Snippet for delete_dataset"""
-
# Create a client
client = aiplatform_v1.DatasetServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteDatasetRequest(
- name="projects/{project}/locations/{location}/datasets/{dataset}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_dataset():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_DatasetService_DeleteDataset_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_export_data_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_export_data_async.py
index 54f67c23ca..fd1820a6c3 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_export_data_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_export_data_async.py
@@ -28,8 +28,6 @@
async def sample_export_data():
- """Snippet for export_data"""
-
# Create a client
client = aiplatform_v1.DatasetServiceAsyncClient()
@@ -38,7 +36,7 @@ async def sample_export_data():
export_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value"
request = aiplatform_v1.ExportDataRequest(
- name="projects/{project}/locations/{location}/datasets/{dataset}",
+ name="name_value",
export_config=export_config,
)
@@ -48,6 +46,8 @@ async def sample_export_data():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_DatasetService_ExportData_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_export_data_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_export_data_sync.py
index 0a9a29921e..2f5e55af10 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_export_data_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_export_data_sync.py
@@ -28,8 +28,6 @@
def sample_export_data():
- """Snippet for export_data"""
-
# Create a client
client = aiplatform_v1.DatasetServiceClient()
@@ -38,7 +36,7 @@ def sample_export_data():
export_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value"
request = aiplatform_v1.ExportDataRequest(
- name="projects/{project}/locations/{location}/datasets/{dataset}",
+ name="name_value",
export_config=export_config,
)
@@ -48,6 +46,8 @@ def sample_export_data():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_DatasetService_ExportData_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_get_annotation_spec_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_get_annotation_spec_async.py
index ddec15a3dc..2e120b46f0 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_get_annotation_spec_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_get_annotation_spec_async.py
@@ -28,20 +28,18 @@
async def sample_get_annotation_spec():
- """Snippet for get_annotation_spec"""
-
# Create a client
client = aiplatform_v1.DatasetServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.GetAnnotationSpecRequest(
- name="projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}",
+ name="name_value",
)
# Make the request
response = await client.get_annotation_spec(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_DatasetService_GetAnnotationSpec_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_get_annotation_spec_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_get_annotation_spec_sync.py
index 4ace012318..54384727d1 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_get_annotation_spec_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_get_annotation_spec_sync.py
@@ -28,20 +28,18 @@
def sample_get_annotation_spec():
- """Snippet for get_annotation_spec"""
-
# Create a client
client = aiplatform_v1.DatasetServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetAnnotationSpecRequest(
- name="projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}",
+ name="name_value",
)
# Make the request
response = client.get_annotation_spec(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_DatasetService_GetAnnotationSpec_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_get_dataset_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_get_dataset_async.py
index 9a05b6af5c..79f64616c8 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_get_dataset_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_get_dataset_async.py
@@ -28,20 +28,18 @@
async def sample_get_dataset():
- """Snippet for get_dataset"""
-
# Create a client
client = aiplatform_v1.DatasetServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.GetDatasetRequest(
- name="projects/{project}/locations/{location}/datasets/{dataset}",
+ name="name_value",
)
# Make the request
response = await client.get_dataset(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_DatasetService_GetDataset_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_get_dataset_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_get_dataset_sync.py
index 6dd89085d9..04c1a00317 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_get_dataset_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_get_dataset_sync.py
@@ -28,20 +28,18 @@
def sample_get_dataset():
- """Snippet for get_dataset"""
-
# Create a client
client = aiplatform_v1.DatasetServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetDatasetRequest(
- name="projects/{project}/locations/{location}/datasets/{dataset}",
+ name="name_value",
)
# Make the request
response = client.get_dataset(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_DatasetService_GetDataset_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_import_data_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_import_data_async.py
index 431cc0d136..bd3635f7cb 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_import_data_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_import_data_async.py
@@ -28,8 +28,6 @@
async def sample_import_data():
- """Snippet for import_data"""
-
# Create a client
client = aiplatform_v1.DatasetServiceAsyncClient()
@@ -39,7 +37,7 @@ async def sample_import_data():
import_configs.import_schema_uri = "import_schema_uri_value"
request = aiplatform_v1.ImportDataRequest(
- name="projects/{project}/locations/{location}/datasets/{dataset}",
+ name="name_value",
import_configs=import_configs,
)
@@ -49,6 +47,8 @@ async def sample_import_data():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_DatasetService_ImportData_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_import_data_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_import_data_sync.py
index e0da542ed2..377cccfdc3 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_import_data_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_import_data_sync.py
@@ -28,8 +28,6 @@
def sample_import_data():
- """Snippet for import_data"""
-
# Create a client
client = aiplatform_v1.DatasetServiceClient()
@@ -39,7 +37,7 @@ def sample_import_data():
import_configs.import_schema_uri = "import_schema_uri_value"
request = aiplatform_v1.ImportDataRequest(
- name="projects/{project}/locations/{location}/datasets/{dataset}",
+ name="name_value",
import_configs=import_configs,
)
@@ -49,6 +47,8 @@ def sample_import_data():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_DatasetService_ImportData_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_annotations_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_annotations_async.py
index 738db74f9e..a543d8fc1e 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_annotations_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_annotations_async.py
@@ -28,18 +28,18 @@
async def sample_list_annotations():
- """Snippet for list_annotations"""
-
# Create a client
client = aiplatform_v1.DatasetServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.ListAnnotationsRequest(
- parent="projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_annotations(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_annotations_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_annotations_sync.py
index 8b30536f65..139e5925d6 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_annotations_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_annotations_sync.py
@@ -28,18 +28,18 @@
def sample_list_annotations():
- """Snippet for list_annotations"""
-
# Create a client
client = aiplatform_v1.DatasetServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListAnnotationsRequest(
- parent="projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_annotations(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_data_items_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_data_items_async.py
index 819e185fad..e881a176c0 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_data_items_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_data_items_async.py
@@ -28,18 +28,18 @@
async def sample_list_data_items():
- """Snippet for list_data_items"""
-
# Create a client
client = aiplatform_v1.DatasetServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.ListDataItemsRequest(
- parent="projects/{project}/locations/{location}/datasets/{dataset}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_data_items(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_data_items_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_data_items_sync.py
index d89741309b..c573f677c6 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_data_items_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_data_items_sync.py
@@ -28,18 +28,18 @@
def sample_list_data_items():
- """Snippet for list_data_items"""
-
# Create a client
client = aiplatform_v1.DatasetServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListDataItemsRequest(
- parent="projects/{project}/locations/{location}/datasets/{dataset}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_data_items(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_datasets_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_datasets_async.py
index 84a43b1fba..8f28f87efa 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_datasets_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_datasets_async.py
@@ -28,18 +28,18 @@
async def sample_list_datasets():
- """Snippet for list_datasets"""
-
# Create a client
client = aiplatform_v1.DatasetServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.ListDatasetsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_datasets(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_datasets_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_datasets_sync.py
index a42859e345..146812d6ce 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_datasets_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_datasets_sync.py
@@ -28,18 +28,18 @@
def sample_list_datasets():
- """Snippet for list_datasets"""
-
# Create a client
client = aiplatform_v1.DatasetServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListDatasetsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_datasets(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_update_dataset_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_update_dataset_async.py
index 163d771e9f..7288d8cf61 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_update_dataset_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_update_dataset_async.py
@@ -28,8 +28,6 @@
async def sample_update_dataset():
- """Snippet for update_dataset"""
-
# Create a client
client = aiplatform_v1.DatasetServiceAsyncClient()
@@ -46,7 +44,7 @@ async def sample_update_dataset():
# Make the request
response = await client.update_dataset(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_DatasetService_UpdateDataset_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_update_dataset_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_update_dataset_sync.py
index 280e3b867a..31b939d21f 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_update_dataset_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_update_dataset_sync.py
@@ -28,8 +28,6 @@
def sample_update_dataset():
- """Snippet for update_dataset"""
-
# Create a client
client = aiplatform_v1.DatasetServiceClient()
@@ -46,7 +44,7 @@ def sample_update_dataset():
# Make the request
response = client.update_dataset(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_DatasetService_UpdateDataset_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_create_endpoint_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_create_endpoint_async.py
index 17cde2ef93..dcddb236df 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_create_endpoint_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_create_endpoint_async.py
@@ -28,8 +28,6 @@
async def sample_create_endpoint():
- """Snippet for create_endpoint"""
-
# Create a client
client = aiplatform_v1.EndpointServiceAsyncClient()
@@ -38,7 +36,7 @@ async def sample_create_endpoint():
endpoint.display_name = "display_name_value"
request = aiplatform_v1.CreateEndpointRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
endpoint=endpoint,
)
@@ -48,6 +46,8 @@ async def sample_create_endpoint():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_EndpointService_CreateEndpoint_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_create_endpoint_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_create_endpoint_sync.py
index 859656f47b..edd8136cf5 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_create_endpoint_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_create_endpoint_sync.py
@@ -28,8 +28,6 @@
def sample_create_endpoint():
- """Snippet for create_endpoint"""
-
# Create a client
client = aiplatform_v1.EndpointServiceClient()
@@ -38,7 +36,7 @@ def sample_create_endpoint():
endpoint.display_name = "display_name_value"
request = aiplatform_v1.CreateEndpointRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
endpoint=endpoint,
)
@@ -48,6 +46,8 @@ def sample_create_endpoint():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_EndpointService_CreateEndpoint_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_delete_endpoint_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_delete_endpoint_async.py
index dada747e7e..845111ab9f 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_delete_endpoint_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_delete_endpoint_async.py
@@ -28,14 +28,12 @@
async def sample_delete_endpoint():
- """Snippet for delete_endpoint"""
-
# Create a client
client = aiplatform_v1.EndpointServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteEndpointRequest(
- name="projects/{project}/locations/{location}/endpoints/{endpoint}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_endpoint():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_EndpointService_DeleteEndpoint_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_delete_endpoint_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_delete_endpoint_sync.py
index adf08a3169..3f0c12ec1f 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_delete_endpoint_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_delete_endpoint_sync.py
@@ -28,14 +28,12 @@
def sample_delete_endpoint():
- """Snippet for delete_endpoint"""
-
# Create a client
client = aiplatform_v1.EndpointServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteEndpointRequest(
- name="projects/{project}/locations/{location}/endpoints/{endpoint}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_endpoint():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_EndpointService_DeleteEndpoint_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_deploy_model_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_deploy_model_async.py
index 5668c4c4b0..1fd06264cb 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_deploy_model_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_deploy_model_async.py
@@ -28,18 +28,16 @@
async def sample_deploy_model():
- """Snippet for deploy_model"""
-
# Create a client
client = aiplatform_v1.EndpointServiceAsyncClient()
# Initialize request argument(s)
deployed_model = aiplatform_v1.DeployedModel()
deployed_model.dedicated_resources.min_replica_count = 1803
- deployed_model.model = "projects/{project}/locations/{location}/models/{model}"
+ deployed_model.model = "model_value"
request = aiplatform_v1.DeployModelRequest(
- endpoint="projects/{project}/locations/{location}/endpoints/{endpoint}",
+ endpoint="endpoint_value",
deployed_model=deployed_model,
)
@@ -49,6 +47,8 @@ async def sample_deploy_model():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_EndpointService_DeployModel_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_deploy_model_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_deploy_model_sync.py
index 3583e0ff94..3e865c8bd6 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_deploy_model_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_deploy_model_sync.py
@@ -28,18 +28,16 @@
def sample_deploy_model():
- """Snippet for deploy_model"""
-
# Create a client
client = aiplatform_v1.EndpointServiceClient()
# Initialize request argument(s)
deployed_model = aiplatform_v1.DeployedModel()
deployed_model.dedicated_resources.min_replica_count = 1803
- deployed_model.model = "projects/{project}/locations/{location}/models/{model}"
+ deployed_model.model = "model_value"
request = aiplatform_v1.DeployModelRequest(
- endpoint="projects/{project}/locations/{location}/endpoints/{endpoint}",
+ endpoint="endpoint_value",
deployed_model=deployed_model,
)
@@ -49,6 +47,8 @@ def sample_deploy_model():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_EndpointService_DeployModel_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_get_endpoint_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_get_endpoint_async.py
index ee69371e24..9a2a1b7c10 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_get_endpoint_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_get_endpoint_async.py
@@ -28,20 +28,18 @@
async def sample_get_endpoint():
- """Snippet for get_endpoint"""
-
# Create a client
client = aiplatform_v1.EndpointServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.GetEndpointRequest(
- name="projects/{project}/locations/{location}/endpoints/{endpoint}",
+ name="name_value",
)
# Make the request
response = await client.get_endpoint(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_EndpointService_GetEndpoint_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_get_endpoint_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_get_endpoint_sync.py
index fc8f28471b..37ce3ff0c2 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_get_endpoint_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_get_endpoint_sync.py
@@ -28,20 +28,18 @@
def sample_get_endpoint():
- """Snippet for get_endpoint"""
-
# Create a client
client = aiplatform_v1.EndpointServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetEndpointRequest(
- name="projects/{project}/locations/{location}/endpoints/{endpoint}",
+ name="name_value",
)
# Make the request
response = client.get_endpoint(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_EndpointService_GetEndpoint_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_list_endpoints_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_list_endpoints_async.py
index 62acd66529..e017e79dd7 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_list_endpoints_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_list_endpoints_async.py
@@ -28,18 +28,18 @@
async def sample_list_endpoints():
- """Snippet for list_endpoints"""
-
# Create a client
client = aiplatform_v1.EndpointServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.ListEndpointsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_endpoints(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_list_endpoints_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_list_endpoints_sync.py
index e55ce277e5..4a06c86195 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_list_endpoints_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_list_endpoints_sync.py
@@ -28,18 +28,18 @@
def sample_list_endpoints():
- """Snippet for list_endpoints"""
-
# Create a client
client = aiplatform_v1.EndpointServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListEndpointsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_endpoints(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_undeploy_model_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_undeploy_model_async.py
index 08fd50e9db..84465cd3c8 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_undeploy_model_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_undeploy_model_async.py
@@ -28,14 +28,12 @@
async def sample_undeploy_model():
- """Snippet for undeploy_model"""
-
# Create a client
client = aiplatform_v1.EndpointServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.UndeployModelRequest(
- endpoint="projects/{project}/locations/{location}/endpoints/{endpoint}",
+ endpoint="endpoint_value",
deployed_model_id="deployed_model_id_value",
)
@@ -45,6 +43,8 @@ async def sample_undeploy_model():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_EndpointService_UndeployModel_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_undeploy_model_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_undeploy_model_sync.py
index 1e65e59cbf..e147edf7b7 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_undeploy_model_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_undeploy_model_sync.py
@@ -28,14 +28,12 @@
def sample_undeploy_model():
- """Snippet for undeploy_model"""
-
# Create a client
client = aiplatform_v1.EndpointServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.UndeployModelRequest(
- endpoint="projects/{project}/locations/{location}/endpoints/{endpoint}",
+ endpoint="endpoint_value",
deployed_model_id="deployed_model_id_value",
)
@@ -45,6 +43,8 @@ def sample_undeploy_model():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_EndpointService_UndeployModel_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_update_endpoint_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_update_endpoint_async.py
index 4a87e65ba5..55bc233f38 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_update_endpoint_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_update_endpoint_async.py
@@ -28,8 +28,6 @@
async def sample_update_endpoint():
- """Snippet for update_endpoint"""
-
# Create a client
client = aiplatform_v1.EndpointServiceAsyncClient()
@@ -44,7 +42,7 @@ async def sample_update_endpoint():
# Make the request
response = await client.update_endpoint(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_EndpointService_UpdateEndpoint_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_update_endpoint_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_update_endpoint_sync.py
index ee58bb3bdc..0fd6421bca 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_update_endpoint_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_endpoint_service_update_endpoint_sync.py
@@ -28,8 +28,6 @@
def sample_update_endpoint():
- """Snippet for update_endpoint"""
-
# Create a client
client = aiplatform_v1.EndpointServiceClient()
@@ -44,7 +42,7 @@ def sample_update_endpoint():
# Make the request
response = client.update_endpoint(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_EndpointService_UpdateEndpoint_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_online_serving_service_read_feature_values_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_online_serving_service_read_feature_values_async.py
index 510fcaec23..9764dc1636 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_online_serving_service_read_feature_values_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_online_serving_service_read_feature_values_async.py
@@ -28,8 +28,6 @@
async def sample_read_feature_values():
- """Snippet for read_feature_values"""
-
# Create a client
client = aiplatform_v1.FeaturestoreOnlineServingServiceAsyncClient()
@@ -38,7 +36,7 @@ async def sample_read_feature_values():
feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2']
request = aiplatform_v1.ReadFeatureValuesRequest(
- entity_type="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}",
+ entity_type="entity_type_value",
entity_id="entity_id_value",
feature_selector=feature_selector,
)
@@ -46,7 +44,7 @@ async def sample_read_feature_values():
# Make the request
response = await client.read_feature_values(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_FeaturestoreOnlineServingService_ReadFeatureValues_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_online_serving_service_read_feature_values_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_online_serving_service_read_feature_values_sync.py
index bb24e98032..649e818369 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_online_serving_service_read_feature_values_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_online_serving_service_read_feature_values_sync.py
@@ -28,8 +28,6 @@
def sample_read_feature_values():
- """Snippet for read_feature_values"""
-
# Create a client
client = aiplatform_v1.FeaturestoreOnlineServingServiceClient()
@@ -38,7 +36,7 @@ def sample_read_feature_values():
feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2']
request = aiplatform_v1.ReadFeatureValuesRequest(
- entity_type="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}",
+ entity_type="entity_type_value",
entity_id="entity_id_value",
feature_selector=feature_selector,
)
@@ -46,7 +44,7 @@ def sample_read_feature_values():
# Make the request
response = client.read_feature_values(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_FeaturestoreOnlineServingService_ReadFeatureValues_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_online_serving_service_streaming_read_feature_values_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_online_serving_service_streaming_read_feature_values_async.py
index e39f5fcfab..1d3d272c4a 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_online_serving_service_streaming_read_feature_values_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_online_serving_service_streaming_read_feature_values_async.py
@@ -28,8 +28,6 @@
async def sample_streaming_read_feature_values():
- """Snippet for streaming_read_feature_values"""
-
# Create a client
client = aiplatform_v1.FeaturestoreOnlineServingServiceAsyncClient()
@@ -38,13 +36,15 @@ async def sample_streaming_read_feature_values():
feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2']
request = aiplatform_v1.StreamingReadFeatureValuesRequest(
- entity_type="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}",
+ entity_type="entity_type_value",
entity_ids=['entity_ids_value_1', 'entity_ids_value_2'],
feature_selector=feature_selector,
)
# Make the request
stream = await client.streaming_read_feature_values(request=request)
+
+ # Handle the response
async for response in stream:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_online_serving_service_streaming_read_feature_values_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_online_serving_service_streaming_read_feature_values_sync.py
index de945266ae..9b9f9ecda1 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_online_serving_service_streaming_read_feature_values_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_online_serving_service_streaming_read_feature_values_sync.py
@@ -28,8 +28,6 @@
def sample_streaming_read_feature_values():
- """Snippet for streaming_read_feature_values"""
-
# Create a client
client = aiplatform_v1.FeaturestoreOnlineServingServiceClient()
@@ -38,13 +36,15 @@ def sample_streaming_read_feature_values():
feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2']
request = aiplatform_v1.StreamingReadFeatureValuesRequest(
- entity_type="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}",
+ entity_type="entity_type_value",
entity_ids=['entity_ids_value_1', 'entity_ids_value_2'],
feature_selector=feature_selector,
)
# Make the request
stream = client.streaming_read_feature_values(request=request)
+
+ # Handle the response
for response in stream:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_batch_create_features_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_batch_create_features_async.py
index fcbafd4b18..be00835750 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_batch_create_features_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_batch_create_features_async.py
@@ -28,19 +28,17 @@
async def sample_batch_create_features():
- """Snippet for batch_create_features"""
-
# Create a client
client = aiplatform_v1.FeaturestoreServiceAsyncClient()
# Initialize request argument(s)
requests = aiplatform_v1.CreateFeatureRequest()
- requests.parent = "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}"
+ requests.parent = "parent_value"
requests.feature.value_type = "BYTES"
requests.feature_id = "feature_id_value"
request = aiplatform_v1.BatchCreateFeaturesRequest(
- parent="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}",
+ parent="parent_value",
requests=requests,
)
@@ -50,6 +48,8 @@ async def sample_batch_create_features():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_BatchCreateFeatures_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_batch_create_features_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_batch_create_features_sync.py
index c25e52f2ec..673bf9a41e 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_batch_create_features_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_batch_create_features_sync.py
@@ -28,19 +28,17 @@
def sample_batch_create_features():
- """Snippet for batch_create_features"""
-
# Create a client
client = aiplatform_v1.FeaturestoreServiceClient()
# Initialize request argument(s)
requests = aiplatform_v1.CreateFeatureRequest()
- requests.parent = "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}"
+ requests.parent = "parent_value"
requests.feature.value_type = "BYTES"
requests.feature_id = "feature_id_value"
request = aiplatform_v1.BatchCreateFeaturesRequest(
- parent="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}",
+ parent="parent_value",
requests=requests,
)
@@ -50,6 +48,8 @@ def sample_batch_create_features():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_BatchCreateFeatures_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_batch_read_feature_values_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_batch_read_feature_values_async.py
index 8534b3c72c..e3f9141441 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_batch_read_feature_values_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_batch_read_feature_values_async.py
@@ -28,8 +28,6 @@
async def sample_batch_read_feature_values():
- """Snippet for batch_read_feature_values"""
-
# Create a client
client = aiplatform_v1.FeaturestoreServiceAsyncClient()
@@ -46,7 +44,7 @@ async def sample_batch_read_feature_values():
request = aiplatform_v1.BatchReadFeatureValuesRequest(
csv_read_instances=csv_read_instances,
- featurestore="projects/{project}/locations/{location}/featurestores/{featurestore}",
+ featurestore="featurestore_value",
destination=destination,
entity_type_specs=entity_type_specs,
)
@@ -57,6 +55,8 @@ async def sample_batch_read_feature_values():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_BatchReadFeatureValues_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_batch_read_feature_values_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_batch_read_feature_values_sync.py
index 884bd584ac..9a6a2b222c 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_batch_read_feature_values_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_batch_read_feature_values_sync.py
@@ -28,8 +28,6 @@
def sample_batch_read_feature_values():
- """Snippet for batch_read_feature_values"""
-
# Create a client
client = aiplatform_v1.FeaturestoreServiceClient()
@@ -46,7 +44,7 @@ def sample_batch_read_feature_values():
request = aiplatform_v1.BatchReadFeatureValuesRequest(
csv_read_instances=csv_read_instances,
- featurestore="projects/{project}/locations/{location}/featurestores/{featurestore}",
+ featurestore="featurestore_value",
destination=destination,
entity_type_specs=entity_type_specs,
)
@@ -57,6 +55,8 @@ def sample_batch_read_feature_values():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_BatchReadFeatureValues_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_entity_type_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_entity_type_async.py
index cb9af67ac9..817d3c1f57 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_entity_type_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_entity_type_async.py
@@ -28,14 +28,12 @@
async def sample_create_entity_type():
- """Snippet for create_entity_type"""
-
# Create a client
client = aiplatform_v1.FeaturestoreServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.CreateEntityTypeRequest(
- parent="projects/{project}/locations/{location}/featurestores/{featurestore}",
+ parent="parent_value",
entity_type_id="entity_type_id_value",
)
@@ -45,6 +43,8 @@ async def sample_create_entity_type():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_CreateEntityType_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_entity_type_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_entity_type_sync.py
index b334145823..bc39909b48 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_entity_type_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_entity_type_sync.py
@@ -28,14 +28,12 @@
def sample_create_entity_type():
- """Snippet for create_entity_type"""
-
# Create a client
client = aiplatform_v1.FeaturestoreServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.CreateEntityTypeRequest(
- parent="projects/{project}/locations/{location}/featurestores/{featurestore}",
+ parent="parent_value",
entity_type_id="entity_type_id_value",
)
@@ -45,6 +43,8 @@ def sample_create_entity_type():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_CreateEntityType_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_feature_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_feature_async.py
index b165454f5d..bb7fc8dd07 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_feature_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_feature_async.py
@@ -28,8 +28,6 @@
async def sample_create_feature():
- """Snippet for create_feature"""
-
# Create a client
client = aiplatform_v1.FeaturestoreServiceAsyncClient()
@@ -38,7 +36,7 @@ async def sample_create_feature():
feature.value_type = "BYTES"
request = aiplatform_v1.CreateFeatureRequest(
- parent="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}",
+ parent="parent_value",
feature=feature,
feature_id="feature_id_value",
)
@@ -49,6 +47,8 @@ async def sample_create_feature():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_CreateFeature_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_feature_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_feature_sync.py
index f8ce2967ff..5ea40980e2 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_feature_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_feature_sync.py
@@ -28,8 +28,6 @@
def sample_create_feature():
- """Snippet for create_feature"""
-
# Create a client
client = aiplatform_v1.FeaturestoreServiceClient()
@@ -38,7 +36,7 @@ def sample_create_feature():
feature.value_type = "BYTES"
request = aiplatform_v1.CreateFeatureRequest(
- parent="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}",
+ parent="parent_value",
feature=feature,
feature_id="feature_id_value",
)
@@ -49,6 +47,8 @@ def sample_create_feature():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_CreateFeature_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_featurestore_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_featurestore_async.py
index 24b2a3076f..4e72359647 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_featurestore_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_featurestore_async.py
@@ -28,14 +28,12 @@
async def sample_create_featurestore():
- """Snippet for create_featurestore"""
-
# Create a client
client = aiplatform_v1.FeaturestoreServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.CreateFeaturestoreRequest(
- parent="projects/{project}/locations/{location}/featurestores/{featurestore}",
+ parent="parent_value",
featurestore_id="featurestore_id_value",
)
@@ -45,6 +43,8 @@ async def sample_create_featurestore():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_CreateFeaturestore_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_featurestore_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_featurestore_sync.py
index ca34113f61..0d79df38f0 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_featurestore_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_create_featurestore_sync.py
@@ -28,14 +28,12 @@
def sample_create_featurestore():
- """Snippet for create_featurestore"""
-
# Create a client
client = aiplatform_v1.FeaturestoreServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.CreateFeaturestoreRequest(
- parent="projects/{project}/locations/{location}/featurestores/{featurestore}",
+ parent="parent_value",
featurestore_id="featurestore_id_value",
)
@@ -45,6 +43,8 @@ def sample_create_featurestore():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_CreateFeaturestore_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_delete_entity_type_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_delete_entity_type_async.py
index d622431ad2..323b33a8f0 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_delete_entity_type_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_delete_entity_type_async.py
@@ -28,14 +28,12 @@
async def sample_delete_entity_type():
- """Snippet for delete_entity_type"""
-
# Create a client
client = aiplatform_v1.FeaturestoreServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteEntityTypeRequest(
- name="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_entity_type():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_DeleteEntityType_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_delete_entity_type_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_delete_entity_type_sync.py
index 0772766fe8..43e88d2f0b 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_delete_entity_type_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_delete_entity_type_sync.py
@@ -28,14 +28,12 @@
def sample_delete_entity_type():
- """Snippet for delete_entity_type"""
-
# Create a client
client = aiplatform_v1.FeaturestoreServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteEntityTypeRequest(
- name="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_entity_type():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_DeleteEntityType_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_delete_feature_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_delete_feature_async.py
index 64d8809f5f..ab4c6314f1 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_delete_feature_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_delete_feature_async.py
@@ -28,14 +28,12 @@
async def sample_delete_feature():
- """Snippet for delete_feature"""
-
# Create a client
client = aiplatform_v1.FeaturestoreServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteFeatureRequest(
- name="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_feature():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_DeleteFeature_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_delete_feature_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_delete_feature_sync.py
index 645f7d3e1e..c2efc05690 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_delete_feature_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_delete_feature_sync.py
@@ -28,14 +28,12 @@
def sample_delete_feature():
- """Snippet for delete_feature"""
-
# Create a client
client = aiplatform_v1.FeaturestoreServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteFeatureRequest(
- name="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_feature():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_DeleteFeature_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_delete_featurestore_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_delete_featurestore_async.py
index 0ccc18afb7..e8257380e4 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_delete_featurestore_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_delete_featurestore_async.py
@@ -28,14 +28,12 @@
async def sample_delete_featurestore():
- """Snippet for delete_featurestore"""
-
# Create a client
client = aiplatform_v1.FeaturestoreServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteFeaturestoreRequest(
- name="projects/{project}/locations/{location}/featurestores/{featurestore}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_featurestore():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_DeleteFeaturestore_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_delete_featurestore_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_delete_featurestore_sync.py
index 926ca2225d..241bde9b2a 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_delete_featurestore_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_delete_featurestore_sync.py
@@ -28,14 +28,12 @@
def sample_delete_featurestore():
- """Snippet for delete_featurestore"""
-
# Create a client
client = aiplatform_v1.FeaturestoreServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteFeaturestoreRequest(
- name="projects/{project}/locations/{location}/featurestores/{featurestore}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_featurestore():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_DeleteFeaturestore_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_export_feature_values_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_export_feature_values_async.py
index d6c06e1d81..5023b2e41f 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_export_feature_values_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_export_feature_values_async.py
@@ -28,8 +28,6 @@
async def sample_export_feature_values():
- """Snippet for export_feature_values"""
-
# Create a client
client = aiplatform_v1.FeaturestoreServiceAsyncClient()
@@ -41,7 +39,7 @@ async def sample_export_feature_values():
feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2']
request = aiplatform_v1.ExportFeatureValuesRequest(
- entity_type="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}",
+ entity_type="entity_type_value",
destination=destination,
feature_selector=feature_selector,
)
@@ -52,6 +50,8 @@ async def sample_export_feature_values():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_ExportFeatureValues_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_export_feature_values_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_export_feature_values_sync.py
index 264716b3ea..25f376bef4 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_export_feature_values_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_export_feature_values_sync.py
@@ -28,8 +28,6 @@
def sample_export_feature_values():
- """Snippet for export_feature_values"""
-
# Create a client
client = aiplatform_v1.FeaturestoreServiceClient()
@@ -41,7 +39,7 @@ def sample_export_feature_values():
feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2']
request = aiplatform_v1.ExportFeatureValuesRequest(
- entity_type="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}",
+ entity_type="entity_type_value",
destination=destination,
feature_selector=feature_selector,
)
@@ -52,6 +50,8 @@ def sample_export_feature_values():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_ExportFeatureValues_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_get_entity_type_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_get_entity_type_async.py
index 082308e964..3880543111 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_get_entity_type_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_get_entity_type_async.py
@@ -28,20 +28,18 @@
async def sample_get_entity_type():
- """Snippet for get_entity_type"""
-
# Create a client
client = aiplatform_v1.FeaturestoreServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.GetEntityTypeRequest(
- name="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}",
+ name="name_value",
)
# Make the request
response = await client.get_entity_type(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_GetEntityType_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_get_entity_type_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_get_entity_type_sync.py
index 295d16d9ad..61f9b7b083 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_get_entity_type_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_get_entity_type_sync.py
@@ -28,20 +28,18 @@
def sample_get_entity_type():
- """Snippet for get_entity_type"""
-
# Create a client
client = aiplatform_v1.FeaturestoreServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetEntityTypeRequest(
- name="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}",
+ name="name_value",
)
# Make the request
response = client.get_entity_type(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_GetEntityType_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_get_feature_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_get_feature_async.py
index 4a65e6345e..4ac406bb48 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_get_feature_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_get_feature_async.py
@@ -28,20 +28,18 @@
async def sample_get_feature():
- """Snippet for get_feature"""
-
# Create a client
client = aiplatform_v1.FeaturestoreServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.GetFeatureRequest(
- name="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}",
+ name="name_value",
)
# Make the request
response = await client.get_feature(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_GetFeature_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_get_feature_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_get_feature_sync.py
index f2875430cc..0268415d7b 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_get_feature_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_get_feature_sync.py
@@ -28,20 +28,18 @@
def sample_get_feature():
- """Snippet for get_feature"""
-
# Create a client
client = aiplatform_v1.FeaturestoreServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetFeatureRequest(
- name="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}",
+ name="name_value",
)
# Make the request
response = client.get_feature(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_GetFeature_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_get_featurestore_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_get_featurestore_async.py
index 75168d45e5..212100105d 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_get_featurestore_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_get_featurestore_async.py
@@ -28,20 +28,18 @@
async def sample_get_featurestore():
- """Snippet for get_featurestore"""
-
# Create a client
client = aiplatform_v1.FeaturestoreServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.GetFeaturestoreRequest(
- name="projects/{project}/locations/{location}/featurestores/{featurestore}",
+ name="name_value",
)
# Make the request
response = await client.get_featurestore(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_GetFeaturestore_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_get_featurestore_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_get_featurestore_sync.py
index 75bf626d7e..f02e9d9c5a 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_get_featurestore_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_get_featurestore_sync.py
@@ -28,20 +28,18 @@
def sample_get_featurestore():
- """Snippet for get_featurestore"""
-
# Create a client
client = aiplatform_v1.FeaturestoreServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetFeaturestoreRequest(
- name="projects/{project}/locations/{location}/featurestores/{featurestore}",
+ name="name_value",
)
# Make the request
response = client.get_featurestore(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_GetFeaturestore_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_import_feature_values_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_import_feature_values_async.py
index afe611290f..fb38d59f8b 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_import_feature_values_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_import_feature_values_async.py
@@ -28,8 +28,6 @@
async def sample_import_feature_values():
- """Snippet for import_feature_values"""
-
# Create a client
client = aiplatform_v1.FeaturestoreServiceAsyncClient()
@@ -43,7 +41,7 @@ async def sample_import_feature_values():
request = aiplatform_v1.ImportFeatureValuesRequest(
avro_source=avro_source,
feature_time_field="feature_time_field_value",
- entity_type="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}",
+ entity_type="entity_type_value",
feature_specs=feature_specs,
)
@@ -53,6 +51,8 @@ async def sample_import_feature_values():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_ImportFeatureValues_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_import_feature_values_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_import_feature_values_sync.py
index 216b14f4bb..7cf027567e 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_import_feature_values_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_import_feature_values_sync.py
@@ -28,8 +28,6 @@
def sample_import_feature_values():
- """Snippet for import_feature_values"""
-
# Create a client
client = aiplatform_v1.FeaturestoreServiceClient()
@@ -43,7 +41,7 @@ def sample_import_feature_values():
request = aiplatform_v1.ImportFeatureValuesRequest(
avro_source=avro_source,
feature_time_field="feature_time_field_value",
- entity_type="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}",
+ entity_type="entity_type_value",
feature_specs=feature_specs,
)
@@ -53,6 +51,8 @@ def sample_import_feature_values():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_ImportFeatureValues_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_list_entity_types_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_list_entity_types_async.py
index 11cb18bcbb..2fa1182336 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_list_entity_types_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_list_entity_types_async.py
@@ -28,18 +28,18 @@
async def sample_list_entity_types():
- """Snippet for list_entity_types"""
-
# Create a client
client = aiplatform_v1.FeaturestoreServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.ListEntityTypesRequest(
- parent="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_entity_types(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_list_entity_types_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_list_entity_types_sync.py
index 74679b6629..c25ca9824d 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_list_entity_types_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_list_entity_types_sync.py
@@ -28,18 +28,18 @@
def sample_list_entity_types():
- """Snippet for list_entity_types"""
-
# Create a client
client = aiplatform_v1.FeaturestoreServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListEntityTypesRequest(
- parent="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_entity_types(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_list_features_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_list_features_async.py
index 6f9f2a922a..f91fff3725 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_list_features_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_list_features_async.py
@@ -28,18 +28,18 @@
async def sample_list_features():
- """Snippet for list_features"""
-
# Create a client
client = aiplatform_v1.FeaturestoreServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.ListFeaturesRequest(
- parent="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_features(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_list_features_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_list_features_sync.py
index 09f0b753bb..cb26aec168 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_list_features_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_list_features_sync.py
@@ -28,18 +28,18 @@
def sample_list_features():
- """Snippet for list_features"""
-
# Create a client
client = aiplatform_v1.FeaturestoreServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListFeaturesRequest(
- parent="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_features(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_list_featurestores_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_list_featurestores_async.py
index f4504c7d05..7c11ffcb3b 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_list_featurestores_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_list_featurestores_async.py
@@ -28,18 +28,18 @@
async def sample_list_featurestores():
- """Snippet for list_featurestores"""
-
# Create a client
client = aiplatform_v1.FeaturestoreServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.ListFeaturestoresRequest(
- parent="projects/{project}/locations/{location}/featurestores/{featurestore}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_featurestores(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_list_featurestores_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_list_featurestores_sync.py
index d683863b85..76c4789a2a 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_list_featurestores_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_list_featurestores_sync.py
@@ -28,18 +28,18 @@
def sample_list_featurestores():
- """Snippet for list_featurestores"""
-
# Create a client
client = aiplatform_v1.FeaturestoreServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListFeaturestoresRequest(
- parent="projects/{project}/locations/{location}/featurestores/{featurestore}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_featurestores(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_search_features_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_search_features_async.py
index 79feb9ca89..f69587c7c1 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_search_features_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_search_features_async.py
@@ -28,18 +28,18 @@
async def sample_search_features():
- """Snippet for search_features"""
-
# Create a client
client = aiplatform_v1.FeaturestoreServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.SearchFeaturesRequest(
- location="projects/{project}/locations/{location}",
+ location="location_value",
)
# Make the request
page_result = client.search_features(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_search_features_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_search_features_sync.py
index 1f6a77aad1..9ca48fcf9b 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_search_features_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_search_features_sync.py
@@ -28,18 +28,18 @@
def sample_search_features():
- """Snippet for search_features"""
-
# Create a client
client = aiplatform_v1.FeaturestoreServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.SearchFeaturesRequest(
- location="projects/{project}/locations/{location}",
+ location="location_value",
)
# Make the request
page_result = client.search_features(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_update_entity_type_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_update_entity_type_async.py
index dcdfa8eb3a..2b7c77857e 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_update_entity_type_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_update_entity_type_async.py
@@ -28,8 +28,6 @@
async def sample_update_entity_type():
- """Snippet for update_entity_type"""
-
# Create a client
client = aiplatform_v1.FeaturestoreServiceAsyncClient()
@@ -40,7 +38,7 @@ async def sample_update_entity_type():
# Make the request
response = await client.update_entity_type(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_UpdateEntityType_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_update_entity_type_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_update_entity_type_sync.py
index 30a833581d..b44432172a 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_update_entity_type_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_update_entity_type_sync.py
@@ -28,8 +28,6 @@
def sample_update_entity_type():
- """Snippet for update_entity_type"""
-
# Create a client
client = aiplatform_v1.FeaturestoreServiceClient()
@@ -40,7 +38,7 @@ def sample_update_entity_type():
# Make the request
response = client.update_entity_type(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_UpdateEntityType_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_update_feature_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_update_feature_async.py
index 12f1b8d195..3fee9302ba 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_update_feature_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_update_feature_async.py
@@ -28,8 +28,6 @@
async def sample_update_feature():
- """Snippet for update_feature"""
-
# Create a client
client = aiplatform_v1.FeaturestoreServiceAsyncClient()
@@ -44,7 +42,7 @@ async def sample_update_feature():
# Make the request
response = await client.update_feature(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_UpdateFeature_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_update_feature_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_update_feature_sync.py
index 0afab8a753..5e8945aa58 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_update_feature_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_update_feature_sync.py
@@ -28,8 +28,6 @@
def sample_update_feature():
- """Snippet for update_feature"""
-
# Create a client
client = aiplatform_v1.FeaturestoreServiceClient()
@@ -44,7 +42,7 @@ def sample_update_feature():
# Make the request
response = client.update_feature(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_UpdateFeature_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_update_featurestore_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_update_featurestore_async.py
index 85d3a93787..98c69688de 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_update_featurestore_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_update_featurestore_async.py
@@ -28,8 +28,6 @@
async def sample_update_featurestore():
- """Snippet for update_featurestore"""
-
# Create a client
client = aiplatform_v1.FeaturestoreServiceAsyncClient()
@@ -43,6 +41,8 @@ async def sample_update_featurestore():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_UpdateFeaturestore_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_update_featurestore_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_update_featurestore_sync.py
index f25dc6a08e..44922e7a70 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_update_featurestore_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_featurestore_service_update_featurestore_sync.py
@@ -28,8 +28,6 @@
def sample_update_featurestore():
- """Snippet for update_featurestore"""
-
# Create a client
client = aiplatform_v1.FeaturestoreServiceClient()
@@ -43,6 +41,8 @@ def sample_update_featurestore():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_FeaturestoreService_UpdateFeaturestore_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_create_index_endpoint_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_create_index_endpoint_async.py
index 2ac2d86729..ba85235ef1 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_create_index_endpoint_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_create_index_endpoint_async.py
@@ -28,18 +28,15 @@
async def sample_create_index_endpoint():
- """Snippet for create_index_endpoint"""
-
# Create a client
client = aiplatform_v1.IndexEndpointServiceAsyncClient()
# Initialize request argument(s)
index_endpoint = aiplatform_v1.IndexEndpoint()
index_endpoint.display_name = "display_name_value"
- index_endpoint.network = "network_value"
request = aiplatform_v1.CreateIndexEndpointRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
index_endpoint=index_endpoint,
)
@@ -49,6 +46,8 @@ async def sample_create_index_endpoint():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_IndexEndpointService_CreateIndexEndpoint_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_create_index_endpoint_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_create_index_endpoint_sync.py
index 721392dd3c..d2f92fcd45 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_create_index_endpoint_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_create_index_endpoint_sync.py
@@ -28,18 +28,15 @@
def sample_create_index_endpoint():
- """Snippet for create_index_endpoint"""
-
# Create a client
client = aiplatform_v1.IndexEndpointServiceClient()
# Initialize request argument(s)
index_endpoint = aiplatform_v1.IndexEndpoint()
index_endpoint.display_name = "display_name_value"
- index_endpoint.network = "network_value"
request = aiplatform_v1.CreateIndexEndpointRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
index_endpoint=index_endpoint,
)
@@ -49,6 +46,8 @@ def sample_create_index_endpoint():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_IndexEndpointService_CreateIndexEndpoint_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_delete_index_endpoint_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_delete_index_endpoint_async.py
index b11ad6a98c..fe53ef9440 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_delete_index_endpoint_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_delete_index_endpoint_async.py
@@ -28,14 +28,12 @@
async def sample_delete_index_endpoint():
- """Snippet for delete_index_endpoint"""
-
# Create a client
client = aiplatform_v1.IndexEndpointServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteIndexEndpointRequest(
- name="projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_index_endpoint():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_IndexEndpointService_DeleteIndexEndpoint_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_delete_index_endpoint_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_delete_index_endpoint_sync.py
index fdb6b4220a..cd9e08c984 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_delete_index_endpoint_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_delete_index_endpoint_sync.py
@@ -28,14 +28,12 @@
def sample_delete_index_endpoint():
- """Snippet for delete_index_endpoint"""
-
# Create a client
client = aiplatform_v1.IndexEndpointServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteIndexEndpointRequest(
- name="projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_index_endpoint():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_IndexEndpointService_DeleteIndexEndpoint_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_deploy_index_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_deploy_index_async.py
index 2db4b3d033..fca1fdb808 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_deploy_index_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_deploy_index_async.py
@@ -28,18 +28,16 @@
async def sample_deploy_index():
- """Snippet for deploy_index"""
-
# Create a client
client = aiplatform_v1.IndexEndpointServiceAsyncClient()
# Initialize request argument(s)
deployed_index = aiplatform_v1.DeployedIndex()
deployed_index.id = "id_value"
- deployed_index.index = "projects/{project}/locations/{location}/indexes/{index}"
+ deployed_index.index = "index_value"
request = aiplatform_v1.DeployIndexRequest(
- index_endpoint="projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}",
+ index_endpoint="index_endpoint_value",
deployed_index=deployed_index,
)
@@ -49,6 +47,8 @@ async def sample_deploy_index():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_IndexEndpointService_DeployIndex_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_deploy_index_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_deploy_index_sync.py
index eb42c27955..be29db6888 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_deploy_index_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_deploy_index_sync.py
@@ -28,18 +28,16 @@
def sample_deploy_index():
- """Snippet for deploy_index"""
-
# Create a client
client = aiplatform_v1.IndexEndpointServiceClient()
# Initialize request argument(s)
deployed_index = aiplatform_v1.DeployedIndex()
deployed_index.id = "id_value"
- deployed_index.index = "projects/{project}/locations/{location}/indexes/{index}"
+ deployed_index.index = "index_value"
request = aiplatform_v1.DeployIndexRequest(
- index_endpoint="projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}",
+ index_endpoint="index_endpoint_value",
deployed_index=deployed_index,
)
@@ -49,6 +47,8 @@ def sample_deploy_index():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_IndexEndpointService_DeployIndex_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_get_index_endpoint_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_get_index_endpoint_async.py
index abbb4dc5c3..128a504b27 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_get_index_endpoint_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_get_index_endpoint_async.py
@@ -28,20 +28,18 @@
async def sample_get_index_endpoint():
- """Snippet for get_index_endpoint"""
-
# Create a client
client = aiplatform_v1.IndexEndpointServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.GetIndexEndpointRequest(
- name="projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}",
+ name="name_value",
)
# Make the request
response = await client.get_index_endpoint(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_IndexEndpointService_GetIndexEndpoint_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_get_index_endpoint_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_get_index_endpoint_sync.py
index bf58c15b03..4112a20993 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_get_index_endpoint_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_get_index_endpoint_sync.py
@@ -28,20 +28,18 @@
def sample_get_index_endpoint():
- """Snippet for get_index_endpoint"""
-
# Create a client
client = aiplatform_v1.IndexEndpointServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetIndexEndpointRequest(
- name="projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}",
+ name="name_value",
)
# Make the request
response = client.get_index_endpoint(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_IndexEndpointService_GetIndexEndpoint_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_list_index_endpoints_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_list_index_endpoints_async.py
index 39e605ffd3..9f867ba780 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_list_index_endpoints_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_list_index_endpoints_async.py
@@ -28,18 +28,18 @@
async def sample_list_index_endpoints():
- """Snippet for list_index_endpoints"""
-
# Create a client
client = aiplatform_v1.IndexEndpointServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.ListIndexEndpointsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_index_endpoints(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_list_index_endpoints_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_list_index_endpoints_sync.py
index f7d9da4b8f..a763694f2b 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_list_index_endpoints_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_list_index_endpoints_sync.py
@@ -28,18 +28,18 @@
def sample_list_index_endpoints():
- """Snippet for list_index_endpoints"""
-
# Create a client
client = aiplatform_v1.IndexEndpointServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListIndexEndpointsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_index_endpoints(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_mutate_deployed_index_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_mutate_deployed_index_async.py
new file mode 100644
index 0000000000..18f833840f
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_mutate_deployed_index_async.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for MutateDeployedIndex
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_IndexEndpointService_MutateDeployedIndex_async]
+from google.cloud import aiplatform_v1
+
+
+async def sample_mutate_deployed_index():
+ # Create a client
+ client = aiplatform_v1.IndexEndpointServiceAsyncClient()
+
+ # Initialize request argument(s)
+ deployed_index = aiplatform_v1.DeployedIndex()
+ deployed_index.id = "id_value"
+ deployed_index.index = "index_value"
+
+ request = aiplatform_v1.MutateDeployedIndexRequest(
+ index_endpoint="index_endpoint_value",
+ deployed_index=deployed_index,
+ )
+
+ # Make the request
+ operation = client.mutate_deployed_index(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = await operation.result()
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_IndexEndpointService_MutateDeployedIndex_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_mutate_deployed_index_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_mutate_deployed_index_sync.py
new file mode 100644
index 0000000000..f2c8a7956f
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_mutate_deployed_index_sync.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for MutateDeployedIndex
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_IndexEndpointService_MutateDeployedIndex_sync]
+from google.cloud import aiplatform_v1
+
+
+def sample_mutate_deployed_index():
+ # Create a client
+ client = aiplatform_v1.IndexEndpointServiceClient()
+
+ # Initialize request argument(s)
+ deployed_index = aiplatform_v1.DeployedIndex()
+ deployed_index.id = "id_value"
+ deployed_index.index = "index_value"
+
+ request = aiplatform_v1.MutateDeployedIndexRequest(
+ index_endpoint="index_endpoint_value",
+ deployed_index=deployed_index,
+ )
+
+ # Make the request
+ operation = client.mutate_deployed_index(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_IndexEndpointService_MutateDeployedIndex_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_undeploy_index_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_undeploy_index_async.py
index 28f38961e4..d87bd75490 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_undeploy_index_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_undeploy_index_async.py
@@ -28,14 +28,12 @@
async def sample_undeploy_index():
- """Snippet for undeploy_index"""
-
# Create a client
client = aiplatform_v1.IndexEndpointServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.UndeployIndexRequest(
- index_endpoint="projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}",
+ index_endpoint="index_endpoint_value",
deployed_index_id="deployed_index_id_value",
)
@@ -45,6 +43,8 @@ async def sample_undeploy_index():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_IndexEndpointService_UndeployIndex_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_undeploy_index_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_undeploy_index_sync.py
index 1e4d6dc5ed..b3fa439c81 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_undeploy_index_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_undeploy_index_sync.py
@@ -28,14 +28,12 @@
def sample_undeploy_index():
- """Snippet for undeploy_index"""
-
# Create a client
client = aiplatform_v1.IndexEndpointServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.UndeployIndexRequest(
- index_endpoint="projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}",
+ index_endpoint="index_endpoint_value",
deployed_index_id="deployed_index_id_value",
)
@@ -45,6 +43,8 @@ def sample_undeploy_index():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_IndexEndpointService_UndeployIndex_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_update_index_endpoint_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_update_index_endpoint_async.py
index ec7697b16a..44000606a9 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_update_index_endpoint_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_update_index_endpoint_async.py
@@ -28,15 +28,12 @@
async def sample_update_index_endpoint():
- """Snippet for update_index_endpoint"""
-
# Create a client
client = aiplatform_v1.IndexEndpointServiceAsyncClient()
# Initialize request argument(s)
index_endpoint = aiplatform_v1.IndexEndpoint()
index_endpoint.display_name = "display_name_value"
- index_endpoint.network = "network_value"
request = aiplatform_v1.UpdateIndexEndpointRequest(
index_endpoint=index_endpoint,
@@ -45,7 +42,7 @@ async def sample_update_index_endpoint():
# Make the request
response = await client.update_index_endpoint(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_IndexEndpointService_UpdateIndexEndpoint_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_update_index_endpoint_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_update_index_endpoint_sync.py
index 7b011c5ff1..dbde72ae6f 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_update_index_endpoint_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_endpoint_service_update_index_endpoint_sync.py
@@ -28,15 +28,12 @@
def sample_update_index_endpoint():
- """Snippet for update_index_endpoint"""
-
# Create a client
client = aiplatform_v1.IndexEndpointServiceClient()
# Initialize request argument(s)
index_endpoint = aiplatform_v1.IndexEndpoint()
index_endpoint.display_name = "display_name_value"
- index_endpoint.network = "network_value"
request = aiplatform_v1.UpdateIndexEndpointRequest(
index_endpoint=index_endpoint,
@@ -45,7 +42,7 @@ def sample_update_index_endpoint():
# Make the request
response = client.update_index_endpoint(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_IndexEndpointService_UpdateIndexEndpoint_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_create_index_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_create_index_async.py
index 37963f08e4..ff75fd6c7c 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_create_index_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_create_index_async.py
@@ -28,8 +28,6 @@
async def sample_create_index():
- """Snippet for create_index"""
-
# Create a client
client = aiplatform_v1.IndexServiceAsyncClient()
@@ -38,7 +36,7 @@ async def sample_create_index():
index.display_name = "display_name_value"
request = aiplatform_v1.CreateIndexRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
index=index,
)
@@ -48,6 +46,8 @@ async def sample_create_index():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_IndexService_CreateIndex_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_create_index_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_create_index_sync.py
index a2c91446e6..59bf750980 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_create_index_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_create_index_sync.py
@@ -28,8 +28,6 @@
def sample_create_index():
- """Snippet for create_index"""
-
# Create a client
client = aiplatform_v1.IndexServiceClient()
@@ -38,7 +36,7 @@ def sample_create_index():
index.display_name = "display_name_value"
request = aiplatform_v1.CreateIndexRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
index=index,
)
@@ -48,6 +46,8 @@ def sample_create_index():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_IndexService_CreateIndex_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_delete_index_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_delete_index_async.py
index f9052903ba..c28b31a8dd 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_delete_index_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_delete_index_async.py
@@ -28,14 +28,12 @@
async def sample_delete_index():
- """Snippet for delete_index"""
-
# Create a client
client = aiplatform_v1.IndexServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteIndexRequest(
- name="projects/{project}/locations/{location}/indexes/{index}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_index():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_IndexService_DeleteIndex_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_delete_index_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_delete_index_sync.py
index a5b62578f6..867b0ca5f2 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_delete_index_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_delete_index_sync.py
@@ -28,14 +28,12 @@
def sample_delete_index():
- """Snippet for delete_index"""
-
# Create a client
client = aiplatform_v1.IndexServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteIndexRequest(
- name="projects/{project}/locations/{location}/indexes/{index}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_index():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_IndexService_DeleteIndex_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_get_index_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_get_index_async.py
index 5ac015f2dd..d394cc0c91 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_get_index_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_get_index_async.py
@@ -28,20 +28,18 @@
async def sample_get_index():
- """Snippet for get_index"""
-
# Create a client
client = aiplatform_v1.IndexServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.GetIndexRequest(
- name="projects/{project}/locations/{location}/indexes/{index}",
+ name="name_value",
)
# Make the request
response = await client.get_index(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_IndexService_GetIndex_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_get_index_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_get_index_sync.py
index a8d9a38457..f59aa54913 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_get_index_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_get_index_sync.py
@@ -28,20 +28,18 @@
def sample_get_index():
- """Snippet for get_index"""
-
# Create a client
client = aiplatform_v1.IndexServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetIndexRequest(
- name="projects/{project}/locations/{location}/indexes/{index}",
+ name="name_value",
)
# Make the request
response = client.get_index(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_IndexService_GetIndex_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_list_indexes_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_list_indexes_async.py
index 9a6a0160d4..5aa51a9aea 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_list_indexes_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_list_indexes_async.py
@@ -28,18 +28,18 @@
async def sample_list_indexes():
- """Snippet for list_indexes"""
-
# Create a client
client = aiplatform_v1.IndexServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.ListIndexesRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_indexes(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_list_indexes_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_list_indexes_sync.py
index cff3a5e7e4..4d09207b88 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_list_indexes_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_list_indexes_sync.py
@@ -28,18 +28,18 @@
def sample_list_indexes():
- """Snippet for list_indexes"""
-
# Create a client
client = aiplatform_v1.IndexServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListIndexesRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_indexes(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_update_index_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_update_index_async.py
index 9ee9e65ad1..c9b77e9c73 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_update_index_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_update_index_async.py
@@ -28,8 +28,6 @@
async def sample_update_index():
- """Snippet for update_index"""
-
# Create a client
client = aiplatform_v1.IndexServiceAsyncClient()
@@ -47,6 +45,8 @@ async def sample_update_index():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_IndexService_UpdateIndex_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_update_index_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_update_index_sync.py
index ca8964e1c5..77f0a34dd6 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_update_index_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_index_service_update_index_sync.py
@@ -28,8 +28,6 @@
def sample_update_index():
- """Snippet for update_index"""
-
# Create a client
client = aiplatform_v1.IndexServiceClient()
@@ -47,6 +45,8 @@ def sample_update_index():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_IndexService_UpdateIndex_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_batch_prediction_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_batch_prediction_job_async.py
index f784cd4a2d..3851a47fdb 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_batch_prediction_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_batch_prediction_job_async.py
@@ -28,18 +28,16 @@
async def sample_cancel_batch_prediction_job():
- """Snippet for cancel_batch_prediction_job"""
-
# Create a client
client = aiplatform_v1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.CancelBatchPredictionJobRequest(
- name="projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}",
+ name="name_value",
)
# Make the request
- response = await client.cancel_batch_prediction_job(request=request)
+ await client.cancel_batch_prediction_job(request=request)
# [END aiplatform_generated_aiplatform_v1_JobService_CancelBatchPredictionJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_batch_prediction_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_batch_prediction_job_sync.py
index 1639c4008d..b1b77354c5 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_batch_prediction_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_batch_prediction_job_sync.py
@@ -28,18 +28,16 @@
def sample_cancel_batch_prediction_job():
- """Snippet for cancel_batch_prediction_job"""
-
# Create a client
client = aiplatform_v1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.CancelBatchPredictionJobRequest(
- name="projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}",
+ name="name_value",
)
# Make the request
- response = client.cancel_batch_prediction_job(request=request)
+ client.cancel_batch_prediction_job(request=request)
# [END aiplatform_generated_aiplatform_v1_JobService_CancelBatchPredictionJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_custom_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_custom_job_async.py
index 8fb89418f7..1ab12e6b03 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_custom_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_custom_job_async.py
@@ -28,18 +28,16 @@
async def sample_cancel_custom_job():
- """Snippet for cancel_custom_job"""
-
# Create a client
client = aiplatform_v1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.CancelCustomJobRequest(
- name="projects/{project}/locations/{location}/customJobs/{custom_job}",
+ name="name_value",
)
# Make the request
- response = await client.cancel_custom_job(request=request)
+ await client.cancel_custom_job(request=request)
# [END aiplatform_generated_aiplatform_v1_JobService_CancelCustomJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_custom_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_custom_job_sync.py
index ca2dc05f41..b246ad41b5 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_custom_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_custom_job_sync.py
@@ -28,18 +28,16 @@
def sample_cancel_custom_job():
- """Snippet for cancel_custom_job"""
-
# Create a client
client = aiplatform_v1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.CancelCustomJobRequest(
- name="projects/{project}/locations/{location}/customJobs/{custom_job}",
+ name="name_value",
)
# Make the request
- response = client.cancel_custom_job(request=request)
+ client.cancel_custom_job(request=request)
# [END aiplatform_generated_aiplatform_v1_JobService_CancelCustomJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_data_labeling_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_data_labeling_job_async.py
index efd119be2c..619e954758 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_data_labeling_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_data_labeling_job_async.py
@@ -28,18 +28,16 @@
async def sample_cancel_data_labeling_job():
- """Snippet for cancel_data_labeling_job"""
-
# Create a client
client = aiplatform_v1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.CancelDataLabelingJobRequest(
- name="projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}",
+ name="name_value",
)
# Make the request
- response = await client.cancel_data_labeling_job(request=request)
+ await client.cancel_data_labeling_job(request=request)
# [END aiplatform_generated_aiplatform_v1_JobService_CancelDataLabelingJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_data_labeling_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_data_labeling_job_sync.py
index 2cea811678..a76ac1e90a 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_data_labeling_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_data_labeling_job_sync.py
@@ -28,18 +28,16 @@
def sample_cancel_data_labeling_job():
- """Snippet for cancel_data_labeling_job"""
-
# Create a client
client = aiplatform_v1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.CancelDataLabelingJobRequest(
- name="projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}",
+ name="name_value",
)
# Make the request
- response = client.cancel_data_labeling_job(request=request)
+ client.cancel_data_labeling_job(request=request)
# [END aiplatform_generated_aiplatform_v1_JobService_CancelDataLabelingJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_hyperparameter_tuning_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_hyperparameter_tuning_job_async.py
index 0b57b2e921..f39141a8ef 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_hyperparameter_tuning_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_hyperparameter_tuning_job_async.py
@@ -28,18 +28,16 @@
async def sample_cancel_hyperparameter_tuning_job():
- """Snippet for cancel_hyperparameter_tuning_job"""
-
# Create a client
client = aiplatform_v1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.CancelHyperparameterTuningJobRequest(
- name="projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}",
+ name="name_value",
)
# Make the request
- response = await client.cancel_hyperparameter_tuning_job(request=request)
+ await client.cancel_hyperparameter_tuning_job(request=request)
# [END aiplatform_generated_aiplatform_v1_JobService_CancelHyperparameterTuningJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_hyperparameter_tuning_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_hyperparameter_tuning_job_sync.py
index 48b26ed323..e4cdce1b63 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_hyperparameter_tuning_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_cancel_hyperparameter_tuning_job_sync.py
@@ -28,18 +28,16 @@
def sample_cancel_hyperparameter_tuning_job():
- """Snippet for cancel_hyperparameter_tuning_job"""
-
# Create a client
client = aiplatform_v1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.CancelHyperparameterTuningJobRequest(
- name="projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}",
+ name="name_value",
)
# Make the request
- response = client.cancel_hyperparameter_tuning_job(request=request)
+ client.cancel_hyperparameter_tuning_job(request=request)
# [END aiplatform_generated_aiplatform_v1_JobService_CancelHyperparameterTuningJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_batch_prediction_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_batch_prediction_job_async.py
index e3e51ef9b2..71d8d78dfe 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_batch_prediction_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_batch_prediction_job_async.py
@@ -28,29 +28,26 @@
async def sample_create_batch_prediction_job():
- """Snippet for create_batch_prediction_job"""
-
# Create a client
client = aiplatform_v1.JobServiceAsyncClient()
# Initialize request argument(s)
batch_prediction_job = aiplatform_v1.BatchPredictionJob()
batch_prediction_job.display_name = "display_name_value"
- batch_prediction_job.model = "projects/{project}/locations/{location}/models/{model}"
batch_prediction_job.input_config.gcs_source.uris = ['uris_value_1', 'uris_value_2']
batch_prediction_job.input_config.instances_format = "instances_format_value"
batch_prediction_job.output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value"
batch_prediction_job.output_config.predictions_format = "predictions_format_value"
request = aiplatform_v1.CreateBatchPredictionJobRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
batch_prediction_job=batch_prediction_job,
)
# Make the request
response = await client.create_batch_prediction_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_JobService_CreateBatchPredictionJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_batch_prediction_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_batch_prediction_job_sync.py
index 77da5aed70..f5a8add5b9 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_batch_prediction_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_batch_prediction_job_sync.py
@@ -28,29 +28,26 @@
def sample_create_batch_prediction_job():
- """Snippet for create_batch_prediction_job"""
-
# Create a client
client = aiplatform_v1.JobServiceClient()
# Initialize request argument(s)
batch_prediction_job = aiplatform_v1.BatchPredictionJob()
batch_prediction_job.display_name = "display_name_value"
- batch_prediction_job.model = "projects/{project}/locations/{location}/models/{model}"
batch_prediction_job.input_config.gcs_source.uris = ['uris_value_1', 'uris_value_2']
batch_prediction_job.input_config.instances_format = "instances_format_value"
batch_prediction_job.output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value"
batch_prediction_job.output_config.predictions_format = "predictions_format_value"
request = aiplatform_v1.CreateBatchPredictionJobRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
batch_prediction_job=batch_prediction_job,
)
# Make the request
response = client.create_batch_prediction_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_JobService_CreateBatchPredictionJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_custom_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_custom_job_async.py
index 5fa38becd6..1240d3adf9 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_custom_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_custom_job_async.py
@@ -28,8 +28,6 @@
async def sample_create_custom_job():
- """Snippet for create_custom_job"""
-
# Create a client
client = aiplatform_v1.JobServiceAsyncClient()
@@ -39,14 +37,14 @@ async def sample_create_custom_job():
custom_job.job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value"
request = aiplatform_v1.CreateCustomJobRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
custom_job=custom_job,
)
# Make the request
response = await client.create_custom_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_JobService_CreateCustomJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_custom_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_custom_job_sync.py
index 53e241a4bb..6df33d3d86 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_custom_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_custom_job_sync.py
@@ -28,8 +28,6 @@
def sample_create_custom_job():
- """Snippet for create_custom_job"""
-
# Create a client
client = aiplatform_v1.JobServiceClient()
@@ -39,14 +37,14 @@ def sample_create_custom_job():
custom_job.job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value"
request = aiplatform_v1.CreateCustomJobRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
custom_job=custom_job,
)
# Make the request
response = client.create_custom_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_JobService_CreateCustomJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_data_labeling_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_data_labeling_job_async.py
index 486e1cc647..40b4b2717f 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_data_labeling_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_data_labeling_job_async.py
@@ -28,29 +28,27 @@
async def sample_create_data_labeling_job():
- """Snippet for create_data_labeling_job"""
-
# Create a client
client = aiplatform_v1.JobServiceAsyncClient()
# Initialize request argument(s)
data_labeling_job = aiplatform_v1.DataLabelingJob()
data_labeling_job.display_name = "display_name_value"
- data_labeling_job.datasets = "projects/{project}/locations/{location}/datasets/{dataset}"
+ data_labeling_job.datasets = ['datasets_value_1', 'datasets_value_2']
data_labeling_job.labeler_count = 1375
data_labeling_job.instruction_uri = "instruction_uri_value"
data_labeling_job.inputs_schema_uri = "inputs_schema_uri_value"
data_labeling_job.inputs.null_value = "NULL_VALUE"
request = aiplatform_v1.CreateDataLabelingJobRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
data_labeling_job=data_labeling_job,
)
# Make the request
response = await client.create_data_labeling_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_JobService_CreateDataLabelingJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_data_labeling_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_data_labeling_job_sync.py
index d3b34b91d8..8e363825e0 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_data_labeling_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_data_labeling_job_sync.py
@@ -28,29 +28,27 @@
def sample_create_data_labeling_job():
- """Snippet for create_data_labeling_job"""
-
# Create a client
client = aiplatform_v1.JobServiceClient()
# Initialize request argument(s)
data_labeling_job = aiplatform_v1.DataLabelingJob()
data_labeling_job.display_name = "display_name_value"
- data_labeling_job.datasets = "projects/{project}/locations/{location}/datasets/{dataset}"
+ data_labeling_job.datasets = ['datasets_value_1', 'datasets_value_2']
data_labeling_job.labeler_count = 1375
data_labeling_job.instruction_uri = "instruction_uri_value"
data_labeling_job.inputs_schema_uri = "inputs_schema_uri_value"
data_labeling_job.inputs.null_value = "NULL_VALUE"
request = aiplatform_v1.CreateDataLabelingJobRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
data_labeling_job=data_labeling_job,
)
# Make the request
response = client.create_data_labeling_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_JobService_CreateDataLabelingJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_hyperparameter_tuning_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_hyperparameter_tuning_job_async.py
index 3cb9b69b38..edf9bcc230 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_hyperparameter_tuning_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_hyperparameter_tuning_job_async.py
@@ -28,8 +28,6 @@
async def sample_create_hyperparameter_tuning_job():
- """Snippet for create_hyperparameter_tuning_job"""
-
# Create a client
client = aiplatform_v1.JobServiceAsyncClient()
@@ -46,14 +44,14 @@ async def sample_create_hyperparameter_tuning_job():
hyperparameter_tuning_job.trial_job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value"
request = aiplatform_v1.CreateHyperparameterTuningJobRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
hyperparameter_tuning_job=hyperparameter_tuning_job,
)
# Make the request
response = await client.create_hyperparameter_tuning_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_JobService_CreateHyperparameterTuningJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_hyperparameter_tuning_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_hyperparameter_tuning_job_sync.py
index f2278f35f3..99948bcb86 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_hyperparameter_tuning_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_hyperparameter_tuning_job_sync.py
@@ -28,8 +28,6 @@
def sample_create_hyperparameter_tuning_job():
- """Snippet for create_hyperparameter_tuning_job"""
-
# Create a client
client = aiplatform_v1.JobServiceClient()
@@ -46,14 +44,14 @@ def sample_create_hyperparameter_tuning_job():
hyperparameter_tuning_job.trial_job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value"
request = aiplatform_v1.CreateHyperparameterTuningJobRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
hyperparameter_tuning_job=hyperparameter_tuning_job,
)
# Make the request
response = client.create_hyperparameter_tuning_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_JobService_CreateHyperparameterTuningJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_model_deployment_monitoring_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_model_deployment_monitoring_job_async.py
index c667e87757..77f8f678ee 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_model_deployment_monitoring_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_model_deployment_monitoring_job_async.py
@@ -28,25 +28,23 @@
async def sample_create_model_deployment_monitoring_job():
- """Snippet for create_model_deployment_monitoring_job"""
-
# Create a client
client = aiplatform_v1.JobServiceAsyncClient()
# Initialize request argument(s)
model_deployment_monitoring_job = aiplatform_v1.ModelDeploymentMonitoringJob()
model_deployment_monitoring_job.display_name = "display_name_value"
- model_deployment_monitoring_job.endpoint = "projects/{project}/locations/{location}/endpoints/{endpoint}"
+ model_deployment_monitoring_job.endpoint = "endpoint_value"
request = aiplatform_v1.CreateModelDeploymentMonitoringJobRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
model_deployment_monitoring_job=model_deployment_monitoring_job,
)
# Make the request
response = await client.create_model_deployment_monitoring_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_JobService_CreateModelDeploymentMonitoringJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_model_deployment_monitoring_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_model_deployment_monitoring_job_sync.py
index 33763a68dc..981fff73f8 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_model_deployment_monitoring_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_create_model_deployment_monitoring_job_sync.py
@@ -28,25 +28,23 @@
def sample_create_model_deployment_monitoring_job():
- """Snippet for create_model_deployment_monitoring_job"""
-
# Create a client
client = aiplatform_v1.JobServiceClient()
# Initialize request argument(s)
model_deployment_monitoring_job = aiplatform_v1.ModelDeploymentMonitoringJob()
model_deployment_monitoring_job.display_name = "display_name_value"
- model_deployment_monitoring_job.endpoint = "projects/{project}/locations/{location}/endpoints/{endpoint}"
+ model_deployment_monitoring_job.endpoint = "endpoint_value"
request = aiplatform_v1.CreateModelDeploymentMonitoringJobRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
model_deployment_monitoring_job=model_deployment_monitoring_job,
)
# Make the request
response = client.create_model_deployment_monitoring_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_JobService_CreateModelDeploymentMonitoringJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_batch_prediction_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_batch_prediction_job_async.py
index fec71df504..72452a4c15 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_batch_prediction_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_batch_prediction_job_async.py
@@ -28,14 +28,12 @@
async def sample_delete_batch_prediction_job():
- """Snippet for delete_batch_prediction_job"""
-
# Create a client
client = aiplatform_v1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteBatchPredictionJobRequest(
- name="projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_batch_prediction_job():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_JobService_DeleteBatchPredictionJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_batch_prediction_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_batch_prediction_job_sync.py
index a70d092a2a..0360478193 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_batch_prediction_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_batch_prediction_job_sync.py
@@ -28,14 +28,12 @@
def sample_delete_batch_prediction_job():
- """Snippet for delete_batch_prediction_job"""
-
# Create a client
client = aiplatform_v1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteBatchPredictionJobRequest(
- name="projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_batch_prediction_job():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_JobService_DeleteBatchPredictionJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_custom_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_custom_job_async.py
index f6d9787e6d..91d285d624 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_custom_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_custom_job_async.py
@@ -28,14 +28,12 @@
async def sample_delete_custom_job():
- """Snippet for delete_custom_job"""
-
# Create a client
client = aiplatform_v1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteCustomJobRequest(
- name="projects/{project}/locations/{location}/customJobs/{custom_job}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_custom_job():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_JobService_DeleteCustomJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_custom_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_custom_job_sync.py
index 575e707ae8..a56d0b00ac 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_custom_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_custom_job_sync.py
@@ -28,14 +28,12 @@
def sample_delete_custom_job():
- """Snippet for delete_custom_job"""
-
# Create a client
client = aiplatform_v1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteCustomJobRequest(
- name="projects/{project}/locations/{location}/customJobs/{custom_job}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_custom_job():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_JobService_DeleteCustomJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_data_labeling_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_data_labeling_job_async.py
index 632c04cd8c..5393c94865 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_data_labeling_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_data_labeling_job_async.py
@@ -28,14 +28,12 @@
async def sample_delete_data_labeling_job():
- """Snippet for delete_data_labeling_job"""
-
# Create a client
client = aiplatform_v1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteDataLabelingJobRequest(
- name="projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_data_labeling_job():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_JobService_DeleteDataLabelingJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_data_labeling_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_data_labeling_job_sync.py
index df4dafd4c8..387353458d 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_data_labeling_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_data_labeling_job_sync.py
@@ -28,14 +28,12 @@
def sample_delete_data_labeling_job():
- """Snippet for delete_data_labeling_job"""
-
# Create a client
client = aiplatform_v1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteDataLabelingJobRequest(
- name="projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_data_labeling_job():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_JobService_DeleteDataLabelingJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_hyperparameter_tuning_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_hyperparameter_tuning_job_async.py
index f72a38772d..27bcb19af7 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_hyperparameter_tuning_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_hyperparameter_tuning_job_async.py
@@ -28,14 +28,12 @@
async def sample_delete_hyperparameter_tuning_job():
- """Snippet for delete_hyperparameter_tuning_job"""
-
# Create a client
client = aiplatform_v1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteHyperparameterTuningJobRequest(
- name="projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_hyperparameter_tuning_job():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_JobService_DeleteHyperparameterTuningJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_hyperparameter_tuning_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_hyperparameter_tuning_job_sync.py
index 24f4eb2a4f..944c9dc2c5 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_hyperparameter_tuning_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_hyperparameter_tuning_job_sync.py
@@ -28,14 +28,12 @@
def sample_delete_hyperparameter_tuning_job():
- """Snippet for delete_hyperparameter_tuning_job"""
-
# Create a client
client = aiplatform_v1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteHyperparameterTuningJobRequest(
- name="projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_hyperparameter_tuning_job():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_JobService_DeleteHyperparameterTuningJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_model_deployment_monitoring_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_model_deployment_monitoring_job_async.py
index 51f94f8939..cad9752aa4 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_model_deployment_monitoring_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_model_deployment_monitoring_job_async.py
@@ -28,14 +28,12 @@
async def sample_delete_model_deployment_monitoring_job():
- """Snippet for delete_model_deployment_monitoring_job"""
-
# Create a client
client = aiplatform_v1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteModelDeploymentMonitoringJobRequest(
- name="projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_model_deployment_monitoring_job():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_JobService_DeleteModelDeploymentMonitoringJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_model_deployment_monitoring_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_model_deployment_monitoring_job_sync.py
index 527ddb31fd..546970c9c3 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_model_deployment_monitoring_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_delete_model_deployment_monitoring_job_sync.py
@@ -28,14 +28,12 @@
def sample_delete_model_deployment_monitoring_job():
- """Snippet for delete_model_deployment_monitoring_job"""
-
# Create a client
client = aiplatform_v1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteModelDeploymentMonitoringJobRequest(
- name="projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_model_deployment_monitoring_job():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_JobService_DeleteModelDeploymentMonitoringJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_batch_prediction_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_batch_prediction_job_async.py
index c32a5d94a0..0dd8d0e2e1 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_batch_prediction_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_batch_prediction_job_async.py
@@ -28,20 +28,18 @@
async def sample_get_batch_prediction_job():
- """Snippet for get_batch_prediction_job"""
-
# Create a client
client = aiplatform_v1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.GetBatchPredictionJobRequest(
- name="projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}",
+ name="name_value",
)
# Make the request
response = await client.get_batch_prediction_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_JobService_GetBatchPredictionJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_batch_prediction_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_batch_prediction_job_sync.py
index 7a60e57673..506ad84155 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_batch_prediction_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_batch_prediction_job_sync.py
@@ -28,20 +28,18 @@
def sample_get_batch_prediction_job():
- """Snippet for get_batch_prediction_job"""
-
# Create a client
client = aiplatform_v1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetBatchPredictionJobRequest(
- name="projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}",
+ name="name_value",
)
# Make the request
response = client.get_batch_prediction_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_JobService_GetBatchPredictionJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_custom_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_custom_job_async.py
index 4bb7a219fb..b027f52650 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_custom_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_custom_job_async.py
@@ -28,20 +28,18 @@
async def sample_get_custom_job():
- """Snippet for get_custom_job"""
-
# Create a client
client = aiplatform_v1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.GetCustomJobRequest(
- name="projects/{project}/locations/{location}/customJobs/{custom_job}",
+ name="name_value",
)
# Make the request
response = await client.get_custom_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_JobService_GetCustomJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_custom_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_custom_job_sync.py
index 4153f49a33..9591f6553b 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_custom_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_custom_job_sync.py
@@ -28,20 +28,18 @@
def sample_get_custom_job():
- """Snippet for get_custom_job"""
-
# Create a client
client = aiplatform_v1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetCustomJobRequest(
- name="projects/{project}/locations/{location}/customJobs/{custom_job}",
+ name="name_value",
)
# Make the request
response = client.get_custom_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_JobService_GetCustomJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_data_labeling_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_data_labeling_job_async.py
index 323147fed7..cf32bc0082 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_data_labeling_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_data_labeling_job_async.py
@@ -28,20 +28,18 @@
async def sample_get_data_labeling_job():
- """Snippet for get_data_labeling_job"""
-
# Create a client
client = aiplatform_v1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.GetDataLabelingJobRequest(
- name="projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}",
+ name="name_value",
)
# Make the request
response = await client.get_data_labeling_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_JobService_GetDataLabelingJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_data_labeling_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_data_labeling_job_sync.py
index 3205c7068e..c964904b77 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_data_labeling_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_data_labeling_job_sync.py
@@ -28,20 +28,18 @@
def sample_get_data_labeling_job():
- """Snippet for get_data_labeling_job"""
-
# Create a client
client = aiplatform_v1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetDataLabelingJobRequest(
- name="projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}",
+ name="name_value",
)
# Make the request
response = client.get_data_labeling_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_JobService_GetDataLabelingJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_hyperparameter_tuning_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_hyperparameter_tuning_job_async.py
index f184e5c955..629fb743ed 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_hyperparameter_tuning_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_hyperparameter_tuning_job_async.py
@@ -28,20 +28,18 @@
async def sample_get_hyperparameter_tuning_job():
- """Snippet for get_hyperparameter_tuning_job"""
-
# Create a client
client = aiplatform_v1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.GetHyperparameterTuningJobRequest(
- name="projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}",
+ name="name_value",
)
# Make the request
response = await client.get_hyperparameter_tuning_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_JobService_GetHyperparameterTuningJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_hyperparameter_tuning_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_hyperparameter_tuning_job_sync.py
index eaad3ac7de..924e49ed61 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_hyperparameter_tuning_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_hyperparameter_tuning_job_sync.py
@@ -28,20 +28,18 @@
def sample_get_hyperparameter_tuning_job():
- """Snippet for get_hyperparameter_tuning_job"""
-
# Create a client
client = aiplatform_v1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetHyperparameterTuningJobRequest(
- name="projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}",
+ name="name_value",
)
# Make the request
response = client.get_hyperparameter_tuning_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_JobService_GetHyperparameterTuningJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_model_deployment_monitoring_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_model_deployment_monitoring_job_async.py
index 4a7467bd02..1597729f3a 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_model_deployment_monitoring_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_model_deployment_monitoring_job_async.py
@@ -28,20 +28,18 @@
async def sample_get_model_deployment_monitoring_job():
- """Snippet for get_model_deployment_monitoring_job"""
-
# Create a client
client = aiplatform_v1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.GetModelDeploymentMonitoringJobRequest(
- name="projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}",
+ name="name_value",
)
# Make the request
response = await client.get_model_deployment_monitoring_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_JobService_GetModelDeploymentMonitoringJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_model_deployment_monitoring_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_model_deployment_monitoring_job_sync.py
index a77cd76b4c..42253ebf34 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_model_deployment_monitoring_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_get_model_deployment_monitoring_job_sync.py
@@ -28,20 +28,18 @@
def sample_get_model_deployment_monitoring_job():
- """Snippet for get_model_deployment_monitoring_job"""
-
# Create a client
client = aiplatform_v1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetModelDeploymentMonitoringJobRequest(
- name="projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}",
+ name="name_value",
)
# Make the request
response = client.get_model_deployment_monitoring_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_JobService_GetModelDeploymentMonitoringJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_batch_prediction_jobs_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_batch_prediction_jobs_async.py
index 60558f1e10..562dc0c864 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_batch_prediction_jobs_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_batch_prediction_jobs_async.py
@@ -28,18 +28,18 @@
async def sample_list_batch_prediction_jobs():
- """Snippet for list_batch_prediction_jobs"""
-
# Create a client
client = aiplatform_v1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.ListBatchPredictionJobsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_batch_prediction_jobs(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_batch_prediction_jobs_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_batch_prediction_jobs_sync.py
index c7bce1120b..5714048d83 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_batch_prediction_jobs_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_batch_prediction_jobs_sync.py
@@ -28,18 +28,18 @@
def sample_list_batch_prediction_jobs():
- """Snippet for list_batch_prediction_jobs"""
-
# Create a client
client = aiplatform_v1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListBatchPredictionJobsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_batch_prediction_jobs(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_custom_jobs_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_custom_jobs_async.py
index 16d4f38995..4c611c4983 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_custom_jobs_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_custom_jobs_async.py
@@ -28,18 +28,18 @@
async def sample_list_custom_jobs():
- """Snippet for list_custom_jobs"""
-
# Create a client
client = aiplatform_v1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.ListCustomJobsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_custom_jobs(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_custom_jobs_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_custom_jobs_sync.py
index fe2cd32a05..028b11b299 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_custom_jobs_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_custom_jobs_sync.py
@@ -28,18 +28,18 @@
def sample_list_custom_jobs():
- """Snippet for list_custom_jobs"""
-
# Create a client
client = aiplatform_v1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListCustomJobsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_custom_jobs(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_data_labeling_jobs_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_data_labeling_jobs_async.py
index ff63ecabec..1bec068aa6 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_data_labeling_jobs_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_data_labeling_jobs_async.py
@@ -28,18 +28,18 @@
async def sample_list_data_labeling_jobs():
- """Snippet for list_data_labeling_jobs"""
-
# Create a client
client = aiplatform_v1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.ListDataLabelingJobsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_data_labeling_jobs(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_data_labeling_jobs_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_data_labeling_jobs_sync.py
index c9776fec3b..d43010fc93 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_data_labeling_jobs_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_data_labeling_jobs_sync.py
@@ -28,18 +28,18 @@
def sample_list_data_labeling_jobs():
- """Snippet for list_data_labeling_jobs"""
-
# Create a client
client = aiplatform_v1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListDataLabelingJobsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_data_labeling_jobs(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_hyperparameter_tuning_jobs_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_hyperparameter_tuning_jobs_async.py
index 94c98f326c..4d27244601 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_hyperparameter_tuning_jobs_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_hyperparameter_tuning_jobs_async.py
@@ -28,18 +28,18 @@
async def sample_list_hyperparameter_tuning_jobs():
- """Snippet for list_hyperparameter_tuning_jobs"""
-
# Create a client
client = aiplatform_v1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.ListHyperparameterTuningJobsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_hyperparameter_tuning_jobs(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_hyperparameter_tuning_jobs_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_hyperparameter_tuning_jobs_sync.py
index abc458240c..fbbb0fedec 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_hyperparameter_tuning_jobs_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_hyperparameter_tuning_jobs_sync.py
@@ -28,18 +28,18 @@
def sample_list_hyperparameter_tuning_jobs():
- """Snippet for list_hyperparameter_tuning_jobs"""
-
# Create a client
client = aiplatform_v1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListHyperparameterTuningJobsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_hyperparameter_tuning_jobs(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_model_deployment_monitoring_jobs_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_model_deployment_monitoring_jobs_async.py
index 1e07010a74..c00b05efb1 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_model_deployment_monitoring_jobs_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_model_deployment_monitoring_jobs_async.py
@@ -28,18 +28,18 @@
async def sample_list_model_deployment_monitoring_jobs():
- """Snippet for list_model_deployment_monitoring_jobs"""
-
# Create a client
client = aiplatform_v1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.ListModelDeploymentMonitoringJobsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_model_deployment_monitoring_jobs(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_model_deployment_monitoring_jobs_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_model_deployment_monitoring_jobs_sync.py
index 8b26652a7b..85b5955418 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_model_deployment_monitoring_jobs_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_model_deployment_monitoring_jobs_sync.py
@@ -28,18 +28,18 @@
def sample_list_model_deployment_monitoring_jobs():
- """Snippet for list_model_deployment_monitoring_jobs"""
-
# Create a client
client = aiplatform_v1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListModelDeploymentMonitoringJobsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_model_deployment_monitoring_jobs(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_pause_model_deployment_monitoring_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_pause_model_deployment_monitoring_job_async.py
index 8e53797c13..710c96708e 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_pause_model_deployment_monitoring_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_pause_model_deployment_monitoring_job_async.py
@@ -28,18 +28,16 @@
async def sample_pause_model_deployment_monitoring_job():
- """Snippet for pause_model_deployment_monitoring_job"""
-
# Create a client
client = aiplatform_v1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.PauseModelDeploymentMonitoringJobRequest(
- name="projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}",
+ name="name_value",
)
# Make the request
- response = await client.pause_model_deployment_monitoring_job(request=request)
+ await client.pause_model_deployment_monitoring_job(request=request)
# [END aiplatform_generated_aiplatform_v1_JobService_PauseModelDeploymentMonitoringJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_pause_model_deployment_monitoring_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_pause_model_deployment_monitoring_job_sync.py
index 11b197880a..66619dc233 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_pause_model_deployment_monitoring_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_pause_model_deployment_monitoring_job_sync.py
@@ -28,18 +28,16 @@
def sample_pause_model_deployment_monitoring_job():
- """Snippet for pause_model_deployment_monitoring_job"""
-
# Create a client
client = aiplatform_v1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.PauseModelDeploymentMonitoringJobRequest(
- name="projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}",
+ name="name_value",
)
# Make the request
- response = client.pause_model_deployment_monitoring_job(request=request)
+ client.pause_model_deployment_monitoring_job(request=request)
# [END aiplatform_generated_aiplatform_v1_JobService_PauseModelDeploymentMonitoringJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_resume_model_deployment_monitoring_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_resume_model_deployment_monitoring_job_async.py
index c7c92d5075..0d22122bf2 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_resume_model_deployment_monitoring_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_resume_model_deployment_monitoring_job_async.py
@@ -28,18 +28,16 @@
async def sample_resume_model_deployment_monitoring_job():
- """Snippet for resume_model_deployment_monitoring_job"""
-
# Create a client
client = aiplatform_v1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.ResumeModelDeploymentMonitoringJobRequest(
- name="projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}",
+ name="name_value",
)
# Make the request
- response = await client.resume_model_deployment_monitoring_job(request=request)
+ await client.resume_model_deployment_monitoring_job(request=request)
# [END aiplatform_generated_aiplatform_v1_JobService_ResumeModelDeploymentMonitoringJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_resume_model_deployment_monitoring_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_resume_model_deployment_monitoring_job_sync.py
index a2609ba452..7459123558 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_resume_model_deployment_monitoring_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_resume_model_deployment_monitoring_job_sync.py
@@ -28,18 +28,16 @@
def sample_resume_model_deployment_monitoring_job():
- """Snippet for resume_model_deployment_monitoring_job"""
-
# Create a client
client = aiplatform_v1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ResumeModelDeploymentMonitoringJobRequest(
- name="projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}",
+ name="name_value",
)
# Make the request
- response = client.resume_model_deployment_monitoring_job(request=request)
+ client.resume_model_deployment_monitoring_job(request=request)
# [END aiplatform_generated_aiplatform_v1_JobService_ResumeModelDeploymentMonitoringJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_search_model_deployment_monitoring_stats_anomalies_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_search_model_deployment_monitoring_stats_anomalies_async.py
index 453c119882..9d11316c95 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_search_model_deployment_monitoring_stats_anomalies_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_search_model_deployment_monitoring_stats_anomalies_async.py
@@ -28,19 +28,19 @@
async def sample_search_model_deployment_monitoring_stats_anomalies():
- """Snippet for search_model_deployment_monitoring_stats_anomalies"""
-
# Create a client
client = aiplatform_v1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.SearchModelDeploymentMonitoringStatsAnomaliesRequest(
- model_deployment_monitoring_job="projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}",
+ model_deployment_monitoring_job="model_deployment_monitoring_job_value",
deployed_model_id="deployed_model_id_value",
)
# Make the request
page_result = client.search_model_deployment_monitoring_stats_anomalies(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py
index c40ed5f195..f816c5854b 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py
@@ -28,19 +28,19 @@
def sample_search_model_deployment_monitoring_stats_anomalies():
- """Snippet for search_model_deployment_monitoring_stats_anomalies"""
-
# Create a client
client = aiplatform_v1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.SearchModelDeploymentMonitoringStatsAnomaliesRequest(
- model_deployment_monitoring_job="projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}",
+ model_deployment_monitoring_job="model_deployment_monitoring_job_value",
deployed_model_id="deployed_model_id_value",
)
# Make the request
page_result = client.search_model_deployment_monitoring_stats_anomalies(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_update_model_deployment_monitoring_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_update_model_deployment_monitoring_job_async.py
index d5831d12bf..1ecb21cc25 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_update_model_deployment_monitoring_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_update_model_deployment_monitoring_job_async.py
@@ -28,15 +28,13 @@
async def sample_update_model_deployment_monitoring_job():
- """Snippet for update_model_deployment_monitoring_job"""
-
# Create a client
client = aiplatform_v1.JobServiceAsyncClient()
# Initialize request argument(s)
model_deployment_monitoring_job = aiplatform_v1.ModelDeploymentMonitoringJob()
model_deployment_monitoring_job.display_name = "display_name_value"
- model_deployment_monitoring_job.endpoint = "projects/{project}/locations/{location}/endpoints/{endpoint}"
+ model_deployment_monitoring_job.endpoint = "endpoint_value"
request = aiplatform_v1.UpdateModelDeploymentMonitoringJobRequest(
model_deployment_monitoring_job=model_deployment_monitoring_job,
@@ -48,6 +46,8 @@ async def sample_update_model_deployment_monitoring_job():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_JobService_UpdateModelDeploymentMonitoringJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_update_model_deployment_monitoring_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_update_model_deployment_monitoring_job_sync.py
index 9c3fa0ed6c..8e0761b616 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_update_model_deployment_monitoring_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_update_model_deployment_monitoring_job_sync.py
@@ -28,15 +28,13 @@
def sample_update_model_deployment_monitoring_job():
- """Snippet for update_model_deployment_monitoring_job"""
-
# Create a client
client = aiplatform_v1.JobServiceClient()
# Initialize request argument(s)
model_deployment_monitoring_job = aiplatform_v1.ModelDeploymentMonitoringJob()
model_deployment_monitoring_job.display_name = "display_name_value"
- model_deployment_monitoring_job.endpoint = "projects/{project}/locations/{location}/endpoints/{endpoint}"
+ model_deployment_monitoring_job.endpoint = "endpoint_value"
request = aiplatform_v1.UpdateModelDeploymentMonitoringJobRequest(
model_deployment_monitoring_job=model_deployment_monitoring_job,
@@ -48,6 +46,8 @@ def sample_update_model_deployment_monitoring_job():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_JobService_UpdateModelDeploymentMonitoringJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_add_context_artifacts_and_executions_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_add_context_artifacts_and_executions_async.py
index 7c24d786b8..12192b766f 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_add_context_artifacts_and_executions_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_add_context_artifacts_and_executions_async.py
@@ -28,20 +28,18 @@
async def sample_add_context_artifacts_and_executions():
- """Snippet for add_context_artifacts_and_executions"""
-
# Create a client
client = aiplatform_v1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.AddContextArtifactsAndExecutionsRequest(
- context="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}",
+ context="context_value",
)
# Make the request
response = await client.add_context_artifacts_and_executions(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_AddContextArtifactsAndExecutions_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_add_context_artifacts_and_executions_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_add_context_artifacts_and_executions_sync.py
index 4f85ec2c19..9d5d582497 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_add_context_artifacts_and_executions_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_add_context_artifacts_and_executions_sync.py
@@ -28,20 +28,18 @@
def sample_add_context_artifacts_and_executions():
- """Snippet for add_context_artifacts_and_executions"""
-
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.AddContextArtifactsAndExecutionsRequest(
- context="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}",
+ context="context_value",
)
# Make the request
response = client.add_context_artifacts_and_executions(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_AddContextArtifactsAndExecutions_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_add_context_children_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_add_context_children_async.py
index 9542ae435b..f013bfc95d 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_add_context_children_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_add_context_children_async.py
@@ -28,20 +28,18 @@
async def sample_add_context_children():
- """Snippet for add_context_children"""
-
# Create a client
client = aiplatform_v1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.AddContextChildrenRequest(
- context="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}",
+ context="context_value",
)
# Make the request
response = await client.add_context_children(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_AddContextChildren_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_add_context_children_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_add_context_children_sync.py
index 16ab546458..d97708f980 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_add_context_children_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_add_context_children_sync.py
@@ -28,20 +28,18 @@
def sample_add_context_children():
- """Snippet for add_context_children"""
-
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.AddContextChildrenRequest(
- context="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}",
+ context="context_value",
)
# Make the request
response = client.add_context_children(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_AddContextChildren_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_add_execution_events_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_add_execution_events_async.py
index 71ef9a0d0f..5ea3920fbd 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_add_execution_events_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_add_execution_events_async.py
@@ -28,20 +28,18 @@
async def sample_add_execution_events():
- """Snippet for add_execution_events"""
-
# Create a client
client = aiplatform_v1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.AddExecutionEventsRequest(
- execution="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}",
+ execution="execution_value",
)
# Make the request
response = await client.add_execution_events(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_AddExecutionEvents_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_add_execution_events_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_add_execution_events_sync.py
index d5efc090a3..b6b14e7038 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_add_execution_events_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_add_execution_events_sync.py
@@ -28,20 +28,18 @@
def sample_add_execution_events():
- """Snippet for add_execution_events"""
-
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.AddExecutionEventsRequest(
- execution="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}",
+ execution="execution_value",
)
# Make the request
response = client.add_execution_events(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_AddExecutionEvents_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_artifact_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_artifact_async.py
index 3f2bd1e44b..6ae63ca292 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_artifact_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_artifact_async.py
@@ -28,20 +28,18 @@
async def sample_create_artifact():
- """Snippet for create_artifact"""
-
# Create a client
client = aiplatform_v1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.CreateArtifactRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}",
+ parent="parent_value",
)
# Make the request
response = await client.create_artifact(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_CreateArtifact_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_artifact_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_artifact_sync.py
index 9ee3fd4b26..6b911877e3 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_artifact_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_artifact_sync.py
@@ -28,20 +28,18 @@
def sample_create_artifact():
- """Snippet for create_artifact"""
-
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.CreateArtifactRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}",
+ parent="parent_value",
)
# Make the request
response = client.create_artifact(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_CreateArtifact_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_context_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_context_async.py
index 970980e6b6..9a7209e72d 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_context_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_context_async.py
@@ -28,20 +28,18 @@
async def sample_create_context():
- """Snippet for create_context"""
-
# Create a client
client = aiplatform_v1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.CreateContextRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}",
+ parent="parent_value",
)
# Make the request
response = await client.create_context(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_CreateContext_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_context_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_context_sync.py
index c714d3b515..551302e07c 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_context_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_context_sync.py
@@ -28,20 +28,18 @@
def sample_create_context():
- """Snippet for create_context"""
-
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.CreateContextRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}",
+ parent="parent_value",
)
# Make the request
response = client.create_context(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_CreateContext_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_execution_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_execution_async.py
index d9ef7f7ed7..7368effc6a 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_execution_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_execution_async.py
@@ -28,20 +28,18 @@
async def sample_create_execution():
- """Snippet for create_execution"""
-
# Create a client
client = aiplatform_v1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.CreateExecutionRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}",
+ parent="parent_value",
)
# Make the request
response = await client.create_execution(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_CreateExecution_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_execution_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_execution_sync.py
index 0c2fc3e7e8..7dc066993f 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_execution_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_execution_sync.py
@@ -28,20 +28,18 @@
def sample_create_execution():
- """Snippet for create_execution"""
-
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.CreateExecutionRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}",
+ parent="parent_value",
)
# Make the request
response = client.create_execution(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_CreateExecution_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_metadata_schema_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_metadata_schema_async.py
index 1ed504d672..5ce36ffe98 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_metadata_schema_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_metadata_schema_async.py
@@ -28,8 +28,6 @@
async def sample_create_metadata_schema():
- """Snippet for create_metadata_schema"""
-
# Create a client
client = aiplatform_v1.MetadataServiceAsyncClient()
@@ -38,14 +36,14 @@ async def sample_create_metadata_schema():
metadata_schema.schema = "schema_value"
request = aiplatform_v1.CreateMetadataSchemaRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}",
+ parent="parent_value",
metadata_schema=metadata_schema,
)
# Make the request
response = await client.create_metadata_schema(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_CreateMetadataSchema_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_metadata_schema_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_metadata_schema_sync.py
index 5a880c6e89..e0be93e55f 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_metadata_schema_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_metadata_schema_sync.py
@@ -28,8 +28,6 @@
def sample_create_metadata_schema():
- """Snippet for create_metadata_schema"""
-
# Create a client
client = aiplatform_v1.MetadataServiceClient()
@@ -38,14 +36,14 @@ def sample_create_metadata_schema():
metadata_schema.schema = "schema_value"
request = aiplatform_v1.CreateMetadataSchemaRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}",
+ parent="parent_value",
metadata_schema=metadata_schema,
)
# Make the request
response = client.create_metadata_schema(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_CreateMetadataSchema_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_metadata_store_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_metadata_store_async.py
index 025fcdccbd..b7be47eeea 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_metadata_store_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_metadata_store_async.py
@@ -28,14 +28,12 @@
async def sample_create_metadata_store():
- """Snippet for create_metadata_store"""
-
# Create a client
client = aiplatform_v1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.CreateMetadataStoreRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_create_metadata_store():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_CreateMetadataStore_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_metadata_store_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_metadata_store_sync.py
index e33c286e2e..f665da6651 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_metadata_store_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_metadata_store_sync.py
@@ -28,14 +28,12 @@
def sample_create_metadata_store():
- """Snippet for create_metadata_store"""
-
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.CreateMetadataStoreRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_create_metadata_store():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_CreateMetadataStore_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_artifact_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_artifact_async.py
index 635a908ea0..a4d579e2d8 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_artifact_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_artifact_async.py
@@ -28,14 +28,12 @@
async def sample_delete_artifact():
- """Snippet for delete_artifact"""
-
# Create a client
client = aiplatform_v1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteArtifactRequest(
- name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_artifact():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_DeleteArtifact_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_artifact_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_artifact_sync.py
index b1071bc025..6cd9ef7474 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_artifact_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_artifact_sync.py
@@ -28,14 +28,12 @@
def sample_delete_artifact():
- """Snippet for delete_artifact"""
-
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteArtifactRequest(
- name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_artifact():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_DeleteArtifact_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_context_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_context_async.py
index 42f61a3c19..23c94c67ca 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_context_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_context_async.py
@@ -28,14 +28,12 @@
async def sample_delete_context():
- """Snippet for delete_context"""
-
# Create a client
client = aiplatform_v1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteContextRequest(
- name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_context():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_DeleteContext_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_context_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_context_sync.py
index 80d77e017e..35870a01cd 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_context_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_context_sync.py
@@ -28,14 +28,12 @@
def sample_delete_context():
- """Snippet for delete_context"""
-
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteContextRequest(
- name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_context():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_DeleteContext_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_execution_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_execution_async.py
index ced8988576..1ced3d0705 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_execution_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_execution_async.py
@@ -28,14 +28,12 @@
async def sample_delete_execution():
- """Snippet for delete_execution"""
-
# Create a client
client = aiplatform_v1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteExecutionRequest(
- name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_execution():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_DeleteExecution_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_execution_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_execution_sync.py
index 3507871122..1e68780482 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_execution_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_execution_sync.py
@@ -28,14 +28,12 @@
def sample_delete_execution():
- """Snippet for delete_execution"""
-
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteExecutionRequest(
- name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_execution():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_DeleteExecution_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_metadata_store_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_metadata_store_async.py
index 69ad317371..f1c5d2254c 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_metadata_store_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_metadata_store_async.py
@@ -28,14 +28,12 @@
async def sample_delete_metadata_store():
- """Snippet for delete_metadata_store"""
-
# Create a client
client = aiplatform_v1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteMetadataStoreRequest(
- name="projects/{project}/locations/{location}/metadataStores/{metadata_store}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_metadata_store():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_DeleteMetadataStore_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_metadata_store_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_metadata_store_sync.py
index 3290f8c864..3b454bfd74 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_metadata_store_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_delete_metadata_store_sync.py
@@ -28,14 +28,12 @@
def sample_delete_metadata_store():
- """Snippet for delete_metadata_store"""
-
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteMetadataStoreRequest(
- name="projects/{project}/locations/{location}/metadataStores/{metadata_store}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_metadata_store():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_DeleteMetadataStore_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_artifact_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_artifact_async.py
index 0bbe6db7b6..d57eecda9d 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_artifact_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_artifact_async.py
@@ -28,20 +28,18 @@
async def sample_get_artifact():
- """Snippet for get_artifact"""
-
# Create a client
client = aiplatform_v1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.GetArtifactRequest(
- name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}",
+ name="name_value",
)
# Make the request
response = await client.get_artifact(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_GetArtifact_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_artifact_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_artifact_sync.py
index f598a2d481..3affb7c382 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_artifact_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_artifact_sync.py
@@ -28,20 +28,18 @@
def sample_get_artifact():
- """Snippet for get_artifact"""
-
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetArtifactRequest(
- name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}",
+ name="name_value",
)
# Make the request
response = client.get_artifact(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_GetArtifact_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_context_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_context_async.py
index d3beda6639..8c91b57836 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_context_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_context_async.py
@@ -28,20 +28,18 @@
async def sample_get_context():
- """Snippet for get_context"""
-
# Create a client
client = aiplatform_v1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.GetContextRequest(
- name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}",
+ name="name_value",
)
# Make the request
response = await client.get_context(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_GetContext_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_context_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_context_sync.py
index 43b0a5eae1..3d9538c77f 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_context_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_context_sync.py
@@ -28,20 +28,18 @@
def sample_get_context():
- """Snippet for get_context"""
-
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetContextRequest(
- name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}",
+ name="name_value",
)
# Make the request
response = client.get_context(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_GetContext_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_execution_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_execution_async.py
index 2cb5c9858b..eef639bd00 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_execution_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_execution_async.py
@@ -28,20 +28,18 @@
async def sample_get_execution():
- """Snippet for get_execution"""
-
# Create a client
client = aiplatform_v1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.GetExecutionRequest(
- name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}",
+ name="name_value",
)
# Make the request
response = await client.get_execution(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_GetExecution_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_execution_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_execution_sync.py
index aeb8e6b0db..fd14f04d5a 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_execution_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_execution_sync.py
@@ -28,20 +28,18 @@
def sample_get_execution():
- """Snippet for get_execution"""
-
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetExecutionRequest(
- name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}",
+ name="name_value",
)
# Make the request
response = client.get_execution(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_GetExecution_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_metadata_schema_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_metadata_schema_async.py
index f1613eb74b..58ce65d902 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_metadata_schema_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_metadata_schema_async.py
@@ -28,20 +28,18 @@
async def sample_get_metadata_schema():
- """Snippet for get_metadata_schema"""
-
# Create a client
client = aiplatform_v1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.GetMetadataSchemaRequest(
- name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/metadataSchemas/{metadata_schema}",
+ name="name_value",
)
# Make the request
response = await client.get_metadata_schema(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_GetMetadataSchema_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_metadata_schema_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_metadata_schema_sync.py
index 7f9c301b79..b13a484b79 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_metadata_schema_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_metadata_schema_sync.py
@@ -28,20 +28,18 @@
def sample_get_metadata_schema():
- """Snippet for get_metadata_schema"""
-
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetMetadataSchemaRequest(
- name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/metadataSchemas/{metadata_schema}",
+ name="name_value",
)
# Make the request
response = client.get_metadata_schema(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_GetMetadataSchema_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_metadata_store_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_metadata_store_async.py
index 22f1439b63..204457a3fd 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_metadata_store_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_metadata_store_async.py
@@ -28,20 +28,18 @@
async def sample_get_metadata_store():
- """Snippet for get_metadata_store"""
-
# Create a client
client = aiplatform_v1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.GetMetadataStoreRequest(
- name="projects/{project}/locations/{location}/metadataStores/{metadata_store}",
+ name="name_value",
)
# Make the request
response = await client.get_metadata_store(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_GetMetadataStore_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_metadata_store_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_metadata_store_sync.py
index 2108bd7904..51f537a8c1 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_metadata_store_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_get_metadata_store_sync.py
@@ -28,20 +28,18 @@
def sample_get_metadata_store():
- """Snippet for get_metadata_store"""
-
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetMetadataStoreRequest(
- name="projects/{project}/locations/{location}/metadataStores/{metadata_store}",
+ name="name_value",
)
# Make the request
response = client.get_metadata_store(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_GetMetadataStore_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_artifacts_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_artifacts_async.py
index 26b0aa176c..3e08fe2549 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_artifacts_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_artifacts_async.py
@@ -28,18 +28,18 @@
async def sample_list_artifacts():
- """Snippet for list_artifacts"""
-
# Create a client
client = aiplatform_v1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.ListArtifactsRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_artifacts(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_artifacts_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_artifacts_sync.py
index 1b2379ba43..e0e6a9da8c 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_artifacts_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_artifacts_sync.py
@@ -28,18 +28,18 @@
def sample_list_artifacts():
- """Snippet for list_artifacts"""
-
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListArtifactsRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_artifacts(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_contexts_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_contexts_async.py
index 7b01c04406..c9a70c3ff5 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_contexts_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_contexts_async.py
@@ -28,18 +28,18 @@
async def sample_list_contexts():
- """Snippet for list_contexts"""
-
# Create a client
client = aiplatform_v1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.ListContextsRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_contexts(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_contexts_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_contexts_sync.py
index 9d2c687875..78e4260f8f 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_contexts_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_contexts_sync.py
@@ -28,18 +28,18 @@
def sample_list_contexts():
- """Snippet for list_contexts"""
-
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListContextsRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_contexts(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_executions_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_executions_async.py
index d31f5d4b84..6666626b41 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_executions_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_executions_async.py
@@ -28,18 +28,18 @@
async def sample_list_executions():
- """Snippet for list_executions"""
-
# Create a client
client = aiplatform_v1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.ListExecutionsRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_executions(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_executions_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_executions_sync.py
index 667c8a51fb..9aed0c6952 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_executions_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_executions_sync.py
@@ -28,18 +28,18 @@
def sample_list_executions():
- """Snippet for list_executions"""
-
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListExecutionsRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_executions(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_metadata_schemas_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_metadata_schemas_async.py
index 803f9f45c6..526cf87717 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_metadata_schemas_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_metadata_schemas_async.py
@@ -28,18 +28,18 @@
async def sample_list_metadata_schemas():
- """Snippet for list_metadata_schemas"""
-
# Create a client
client = aiplatform_v1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.ListMetadataSchemasRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/metadataSchemas/{metadata_schema}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_metadata_schemas(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_metadata_schemas_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_metadata_schemas_sync.py
index 4527ce3170..ef69c9bfe9 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_metadata_schemas_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_metadata_schemas_sync.py
@@ -28,18 +28,18 @@
def sample_list_metadata_schemas():
- """Snippet for list_metadata_schemas"""
-
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListMetadataSchemasRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/metadataSchemas/{metadata_schema}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_metadata_schemas(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_metadata_stores_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_metadata_stores_async.py
index b28053b788..75e6946a26 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_metadata_stores_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_metadata_stores_async.py
@@ -28,18 +28,18 @@
async def sample_list_metadata_stores():
- """Snippet for list_metadata_stores"""
-
# Create a client
client = aiplatform_v1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.ListMetadataStoresRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_metadata_stores(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_metadata_stores_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_metadata_stores_sync.py
index 51ad27690a..b1c2cc8631 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_metadata_stores_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_list_metadata_stores_sync.py
@@ -28,18 +28,18 @@
def sample_list_metadata_stores():
- """Snippet for list_metadata_stores"""
-
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListMetadataStoresRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_metadata_stores(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_purge_artifacts_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_purge_artifacts_async.py
index be1cc40f02..9569f78266 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_purge_artifacts_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_purge_artifacts_async.py
@@ -28,14 +28,12 @@
async def sample_purge_artifacts():
- """Snippet for purge_artifacts"""
-
# Create a client
client = aiplatform_v1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.PurgeArtifactsRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}",
+ parent="parent_value",
filter="filter_value",
)
@@ -45,6 +43,8 @@ async def sample_purge_artifacts():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_PurgeArtifacts_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_purge_artifacts_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_purge_artifacts_sync.py
index 041efe6812..f5786af370 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_purge_artifacts_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_purge_artifacts_sync.py
@@ -28,14 +28,12 @@
def sample_purge_artifacts():
- """Snippet for purge_artifacts"""
-
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.PurgeArtifactsRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}",
+ parent="parent_value",
filter="filter_value",
)
@@ -45,6 +43,8 @@ def sample_purge_artifacts():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_PurgeArtifacts_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_purge_contexts_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_purge_contexts_async.py
index eaccbdcfe4..1adb2abf21 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_purge_contexts_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_purge_contexts_async.py
@@ -28,14 +28,12 @@
async def sample_purge_contexts():
- """Snippet for purge_contexts"""
-
# Create a client
client = aiplatform_v1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.PurgeContextsRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}",
+ parent="parent_value",
filter="filter_value",
)
@@ -45,6 +43,8 @@ async def sample_purge_contexts():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_PurgeContexts_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_purge_contexts_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_purge_contexts_sync.py
index 64428ec4eb..c62d7880da 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_purge_contexts_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_purge_contexts_sync.py
@@ -28,14 +28,12 @@
def sample_purge_contexts():
- """Snippet for purge_contexts"""
-
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.PurgeContextsRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}",
+ parent="parent_value",
filter="filter_value",
)
@@ -45,6 +43,8 @@ def sample_purge_contexts():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_PurgeContexts_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_purge_executions_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_purge_executions_async.py
index 14f3a0c45c..5453ac7d04 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_purge_executions_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_purge_executions_async.py
@@ -28,14 +28,12 @@
async def sample_purge_executions():
- """Snippet for purge_executions"""
-
# Create a client
client = aiplatform_v1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.PurgeExecutionsRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}",
+ parent="parent_value",
filter="filter_value",
)
@@ -45,6 +43,8 @@ async def sample_purge_executions():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_PurgeExecutions_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_purge_executions_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_purge_executions_sync.py
index 8a0668f61e..2efdf10c66 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_purge_executions_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_purge_executions_sync.py
@@ -28,14 +28,12 @@
def sample_purge_executions():
- """Snippet for purge_executions"""
-
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.PurgeExecutionsRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}",
+ parent="parent_value",
filter="filter_value",
)
@@ -45,6 +43,8 @@ def sample_purge_executions():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_PurgeExecutions_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_query_artifact_lineage_subgraph_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_query_artifact_lineage_subgraph_async.py
index 4981951722..0409c4db63 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_query_artifact_lineage_subgraph_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_query_artifact_lineage_subgraph_async.py
@@ -28,20 +28,18 @@
async def sample_query_artifact_lineage_subgraph():
- """Snippet for query_artifact_lineage_subgraph"""
-
# Create a client
client = aiplatform_v1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.QueryArtifactLineageSubgraphRequest(
- artifact="projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}",
+ artifact="artifact_value",
)
# Make the request
response = await client.query_artifact_lineage_subgraph(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_QueryArtifactLineageSubgraph_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_query_artifact_lineage_subgraph_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_query_artifact_lineage_subgraph_sync.py
index 60f3b717ac..96b0a97a1d 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_query_artifact_lineage_subgraph_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_query_artifact_lineage_subgraph_sync.py
@@ -28,20 +28,18 @@
def sample_query_artifact_lineage_subgraph():
- """Snippet for query_artifact_lineage_subgraph"""
-
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.QueryArtifactLineageSubgraphRequest(
- artifact="projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}",
+ artifact="artifact_value",
)
# Make the request
response = client.query_artifact_lineage_subgraph(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_QueryArtifactLineageSubgraph_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_query_context_lineage_subgraph_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_query_context_lineage_subgraph_async.py
index 0f265d915f..981ffc002a 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_query_context_lineage_subgraph_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_query_context_lineage_subgraph_async.py
@@ -28,20 +28,18 @@
async def sample_query_context_lineage_subgraph():
- """Snippet for query_context_lineage_subgraph"""
-
# Create a client
client = aiplatform_v1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.QueryContextLineageSubgraphRequest(
- context="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}",
+ context="context_value",
)
# Make the request
response = await client.query_context_lineage_subgraph(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_QueryContextLineageSubgraph_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_query_context_lineage_subgraph_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_query_context_lineage_subgraph_sync.py
index f6f37a3c1c..da42b7ffed 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_query_context_lineage_subgraph_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_query_context_lineage_subgraph_sync.py
@@ -28,20 +28,18 @@
def sample_query_context_lineage_subgraph():
- """Snippet for query_context_lineage_subgraph"""
-
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.QueryContextLineageSubgraphRequest(
- context="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}",
+ context="context_value",
)
# Make the request
response = client.query_context_lineage_subgraph(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_QueryContextLineageSubgraph_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_query_execution_inputs_and_outputs_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_query_execution_inputs_and_outputs_async.py
index d696cb18e1..2c9d36daf9 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_query_execution_inputs_and_outputs_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_query_execution_inputs_and_outputs_async.py
@@ -28,20 +28,18 @@
async def sample_query_execution_inputs_and_outputs():
- """Snippet for query_execution_inputs_and_outputs"""
-
# Create a client
client = aiplatform_v1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.QueryExecutionInputsAndOutputsRequest(
- execution="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}",
+ execution="execution_value",
)
# Make the request
response = await client.query_execution_inputs_and_outputs(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_QueryExecutionInputsAndOutputs_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_query_execution_inputs_and_outputs_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_query_execution_inputs_and_outputs_sync.py
index a10010609e..6ef3c5e597 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_query_execution_inputs_and_outputs_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_query_execution_inputs_and_outputs_sync.py
@@ -28,20 +28,18 @@
def sample_query_execution_inputs_and_outputs():
- """Snippet for query_execution_inputs_and_outputs"""
-
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.QueryExecutionInputsAndOutputsRequest(
- execution="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}",
+ execution="execution_value",
)
# Make the request
response = client.query_execution_inputs_and_outputs(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_QueryExecutionInputsAndOutputs_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_update_artifact_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_update_artifact_async.py
index c296ef6721..2f7bec2157 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_update_artifact_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_update_artifact_async.py
@@ -28,8 +28,6 @@
async def sample_update_artifact():
- """Snippet for update_artifact"""
-
# Create a client
client = aiplatform_v1.MetadataServiceAsyncClient()
@@ -40,7 +38,7 @@ async def sample_update_artifact():
# Make the request
response = await client.update_artifact(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_UpdateArtifact_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_update_artifact_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_update_artifact_sync.py
index 7f241abed3..b1f75e2ca5 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_update_artifact_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_update_artifact_sync.py
@@ -28,8 +28,6 @@
def sample_update_artifact():
- """Snippet for update_artifact"""
-
# Create a client
client = aiplatform_v1.MetadataServiceClient()
@@ -40,7 +38,7 @@ def sample_update_artifact():
# Make the request
response = client.update_artifact(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_UpdateArtifact_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_update_context_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_update_context_async.py
index bbf619f63d..ef003070c9 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_update_context_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_update_context_async.py
@@ -28,8 +28,6 @@
async def sample_update_context():
- """Snippet for update_context"""
-
# Create a client
client = aiplatform_v1.MetadataServiceAsyncClient()
@@ -40,7 +38,7 @@ async def sample_update_context():
# Make the request
response = await client.update_context(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_UpdateContext_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_update_context_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_update_context_sync.py
index 1f7296f2cc..d289f59a91 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_update_context_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_update_context_sync.py
@@ -28,8 +28,6 @@
def sample_update_context():
- """Snippet for update_context"""
-
# Create a client
client = aiplatform_v1.MetadataServiceClient()
@@ -40,7 +38,7 @@ def sample_update_context():
# Make the request
response = client.update_context(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_UpdateContext_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_update_execution_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_update_execution_async.py
index 7f8be17747..16062f9146 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_update_execution_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_update_execution_async.py
@@ -28,8 +28,6 @@
async def sample_update_execution():
- """Snippet for update_execution"""
-
# Create a client
client = aiplatform_v1.MetadataServiceAsyncClient()
@@ -40,7 +38,7 @@ async def sample_update_execution():
# Make the request
response = await client.update_execution(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_UpdateExecution_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_update_execution_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_update_execution_sync.py
index 15ca84cdec..6681dad63e 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_update_execution_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_update_execution_sync.py
@@ -28,8 +28,6 @@
def sample_update_execution():
- """Snippet for update_execution"""
-
# Create a client
client = aiplatform_v1.MetadataServiceClient()
@@ -40,7 +38,7 @@ def sample_update_execution():
# Make the request
response = client.update_execution(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_UpdateExecution_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_migration_service_batch_migrate_resources_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_migration_service_batch_migrate_resources_async.py
index 6afc441357..21d019478c 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_migration_service_batch_migrate_resources_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_migration_service_batch_migrate_resources_async.py
@@ -28,19 +28,17 @@
async def sample_batch_migrate_resources():
- """Snippet for batch_migrate_resources"""
-
# Create a client
client = aiplatform_v1.MigrationServiceAsyncClient()
# Initialize request argument(s)
migrate_resource_requests = aiplatform_v1.MigrateResourceRequest()
migrate_resource_requests.migrate_ml_engine_model_version_config.endpoint = "endpoint_value"
- migrate_resource_requests.migrate_ml_engine_model_version_config.model_version = "projects/{project}/models/{model}/versions/{version}"
+ migrate_resource_requests.migrate_ml_engine_model_version_config.model_version = "model_version_value"
migrate_resource_requests.migrate_ml_engine_model_version_config.model_display_name = "model_display_name_value"
request = aiplatform_v1.BatchMigrateResourcesRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
migrate_resource_requests=migrate_resource_requests,
)
@@ -50,6 +48,8 @@ async def sample_batch_migrate_resources():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MigrationService_BatchMigrateResources_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_migration_service_batch_migrate_resources_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_migration_service_batch_migrate_resources_sync.py
index 957f134fe8..cf895368f0 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_migration_service_batch_migrate_resources_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_migration_service_batch_migrate_resources_sync.py
@@ -28,19 +28,17 @@
def sample_batch_migrate_resources():
- """Snippet for batch_migrate_resources"""
-
# Create a client
client = aiplatform_v1.MigrationServiceClient()
# Initialize request argument(s)
migrate_resource_requests = aiplatform_v1.MigrateResourceRequest()
migrate_resource_requests.migrate_ml_engine_model_version_config.endpoint = "endpoint_value"
- migrate_resource_requests.migrate_ml_engine_model_version_config.model_version = "projects/{project}/models/{model}/versions/{version}"
+ migrate_resource_requests.migrate_ml_engine_model_version_config.model_version = "model_version_value"
migrate_resource_requests.migrate_ml_engine_model_version_config.model_display_name = "model_display_name_value"
request = aiplatform_v1.BatchMigrateResourcesRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
migrate_resource_requests=migrate_resource_requests,
)
@@ -50,6 +48,8 @@ def sample_batch_migrate_resources():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MigrationService_BatchMigrateResources_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_migration_service_search_migratable_resources_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_migration_service_search_migratable_resources_async.py
index f9f1384ae6..f9d2d6fa7f 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_migration_service_search_migratable_resources_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_migration_service_search_migratable_resources_async.py
@@ -28,18 +28,18 @@
async def sample_search_migratable_resources():
- """Snippet for search_migratable_resources"""
-
# Create a client
client = aiplatform_v1.MigrationServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.SearchMigratableResourcesRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.search_migratable_resources(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_migration_service_search_migratable_resources_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_migration_service_search_migratable_resources_sync.py
index b92abbce8f..bd66673263 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_migration_service_search_migratable_resources_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_migration_service_search_migratable_resources_sync.py
@@ -28,18 +28,18 @@
def sample_search_migratable_resources():
- """Snippet for search_migratable_resources"""
-
# Create a client
client = aiplatform_v1.MigrationServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.SearchMigratableResourcesRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.search_migratable_resources(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_delete_model_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_delete_model_async.py
index 812e73cc68..9e4fd4238d 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_delete_model_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_delete_model_async.py
@@ -28,14 +28,12 @@
async def sample_delete_model():
- """Snippet for delete_model"""
-
# Create a client
client = aiplatform_v1.ModelServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteModelRequest(
- name="projects/{project}/locations/{location}/models/{model}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_model():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_ModelService_DeleteModel_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_delete_model_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_delete_model_sync.py
index bfcd343735..5c5a1446eb 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_delete_model_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_delete_model_sync.py
@@ -28,14 +28,12 @@
def sample_delete_model():
- """Snippet for delete_model"""
-
# Create a client
client = aiplatform_v1.ModelServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteModelRequest(
- name="projects/{project}/locations/{location}/models/{model}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_model():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_ModelService_DeleteModel_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_export_model_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_export_model_async.py
index efe8a4690b..cadf958cba 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_export_model_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_export_model_async.py
@@ -28,14 +28,12 @@
async def sample_export_model():
- """Snippet for export_model"""
-
# Create a client
client = aiplatform_v1.ModelServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.ExportModelRequest(
- name="projects/{project}/locations/{location}/models/{model}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_export_model():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_ModelService_ExportModel_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_export_model_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_export_model_sync.py
index 458310fbb5..92ed15c14b 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_export_model_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_export_model_sync.py
@@ -28,14 +28,12 @@
def sample_export_model():
- """Snippet for export_model"""
-
# Create a client
client = aiplatform_v1.ModelServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ExportModelRequest(
- name="projects/{project}/locations/{location}/models/{model}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_export_model():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_ModelService_ExportModel_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_async.py
index 821a034f7c..7bbe9e1e12 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_async.py
@@ -28,20 +28,18 @@
async def sample_get_model():
- """Snippet for get_model"""
-
# Create a client
client = aiplatform_v1.ModelServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.GetModelRequest(
- name="projects/{project}/locations/{location}/models/{model}",
+ name="name_value",
)
# Make the request
response = await client.get_model(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_ModelService_GetModel_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_evaluation_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_evaluation_async.py
index d767e100e5..0ca6b8fd76 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_evaluation_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_evaluation_async.py
@@ -28,20 +28,18 @@
async def sample_get_model_evaluation():
- """Snippet for get_model_evaluation"""
-
# Create a client
client = aiplatform_v1.ModelServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.GetModelEvaluationRequest(
- name="projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}",
+ name="name_value",
)
# Make the request
response = await client.get_model_evaluation(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_ModelService_GetModelEvaluation_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_evaluation_slice_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_evaluation_slice_async.py
index 23c6d2b30c..d06448b804 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_evaluation_slice_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_evaluation_slice_async.py
@@ -28,20 +28,18 @@
async def sample_get_model_evaluation_slice():
- """Snippet for get_model_evaluation_slice"""
-
# Create a client
client = aiplatform_v1.ModelServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.GetModelEvaluationSliceRequest(
- name="projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}",
+ name="name_value",
)
# Make the request
response = await client.get_model_evaluation_slice(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_ModelService_GetModelEvaluationSlice_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_evaluation_slice_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_evaluation_slice_sync.py
index e9bea3470c..7bf4ba1df9 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_evaluation_slice_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_evaluation_slice_sync.py
@@ -28,20 +28,18 @@
def sample_get_model_evaluation_slice():
- """Snippet for get_model_evaluation_slice"""
-
# Create a client
client = aiplatform_v1.ModelServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetModelEvaluationSliceRequest(
- name="projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}",
+ name="name_value",
)
# Make the request
response = client.get_model_evaluation_slice(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_ModelService_GetModelEvaluationSlice_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_evaluation_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_evaluation_sync.py
index 23a788d3ed..1f2fb5a5a2 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_evaluation_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_evaluation_sync.py
@@ -28,20 +28,18 @@
def sample_get_model_evaluation():
- """Snippet for get_model_evaluation"""
-
# Create a client
client = aiplatform_v1.ModelServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetModelEvaluationRequest(
- name="projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}",
+ name="name_value",
)
# Make the request
response = client.get_model_evaluation(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_ModelService_GetModelEvaluation_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_sync.py
index 92538bb899..f1fb3a459a 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_get_model_sync.py
@@ -28,20 +28,18 @@
def sample_get_model():
- """Snippet for get_model"""
-
# Create a client
client = aiplatform_v1.ModelServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetModelRequest(
- name="projects/{project}/locations/{location}/models/{model}",
+ name="name_value",
)
# Make the request
response = client.get_model(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_ModelService_GetModel_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_list_model_evaluation_slices_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_list_model_evaluation_slices_async.py
index bb5ba8c7ab..e4f30ed545 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_list_model_evaluation_slices_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_list_model_evaluation_slices_async.py
@@ -28,18 +28,18 @@
async def sample_list_model_evaluation_slices():
- """Snippet for list_model_evaluation_slices"""
-
# Create a client
client = aiplatform_v1.ModelServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.ListModelEvaluationSlicesRequest(
- parent="projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_model_evaluation_slices(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_list_model_evaluation_slices_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_list_model_evaluation_slices_sync.py
index a427fa68e6..4b69b05762 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_list_model_evaluation_slices_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_list_model_evaluation_slices_sync.py
@@ -28,18 +28,18 @@
def sample_list_model_evaluation_slices():
- """Snippet for list_model_evaluation_slices"""
-
# Create a client
client = aiplatform_v1.ModelServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListModelEvaluationSlicesRequest(
- parent="projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_model_evaluation_slices(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_list_model_evaluations_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_list_model_evaluations_async.py
index 92316298d3..ecca3b784f 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_list_model_evaluations_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_list_model_evaluations_async.py
@@ -28,18 +28,18 @@
async def sample_list_model_evaluations():
- """Snippet for list_model_evaluations"""
-
# Create a client
client = aiplatform_v1.ModelServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.ListModelEvaluationsRequest(
- parent="projects/{project}/locations/{location}/models/{model}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_model_evaluations(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_list_model_evaluations_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_list_model_evaluations_sync.py
index deb082d3ea..cb78d024c8 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_list_model_evaluations_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_list_model_evaluations_sync.py
@@ -28,18 +28,18 @@
def sample_list_model_evaluations():
- """Snippet for list_model_evaluations"""
-
# Create a client
client = aiplatform_v1.ModelServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListModelEvaluationsRequest(
- parent="projects/{project}/locations/{location}/models/{model}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_model_evaluations(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_list_models_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_list_models_async.py
index 70b240179d..bcda61b8d6 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_list_models_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_list_models_async.py
@@ -28,18 +28,18 @@
async def sample_list_models():
- """Snippet for list_models"""
-
# Create a client
client = aiplatform_v1.ModelServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.ListModelsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_models(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_list_models_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_list_models_sync.py
index 6eeea2ec3b..d837b9267e 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_list_models_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_list_models_sync.py
@@ -28,18 +28,18 @@
def sample_list_models():
- """Snippet for list_models"""
-
# Create a client
client = aiplatform_v1.ModelServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListModelsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_models(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_update_model_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_update_model_async.py
index cd261399b8..efaa389d96 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_update_model_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_update_model_async.py
@@ -28,8 +28,6 @@
async def sample_update_model():
- """Snippet for update_model"""
-
# Create a client
client = aiplatform_v1.ModelServiceAsyncClient()
@@ -44,7 +42,7 @@ async def sample_update_model():
# Make the request
response = await client.update_model(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_ModelService_UpdateModel_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_update_model_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_update_model_sync.py
index edbfedf07d..9cd7e28ba3 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_update_model_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_update_model_sync.py
@@ -28,8 +28,6 @@
def sample_update_model():
- """Snippet for update_model"""
-
# Create a client
client = aiplatform_v1.ModelServiceClient()
@@ -44,7 +42,7 @@ def sample_update_model():
# Make the request
response = client.update_model(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_ModelService_UpdateModel_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_upload_model_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_upload_model_async.py
index a5168785bd..46fb755edb 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_upload_model_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_upload_model_async.py
@@ -28,8 +28,6 @@
async def sample_upload_model():
- """Snippet for upload_model"""
-
# Create a client
client = aiplatform_v1.ModelServiceAsyncClient()
@@ -38,7 +36,7 @@ async def sample_upload_model():
model.display_name = "display_name_value"
request = aiplatform_v1.UploadModelRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
model=model,
)
@@ -48,6 +46,8 @@ async def sample_upload_model():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_ModelService_UploadModel_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_upload_model_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_upload_model_sync.py
index c341795b84..880ea1114d 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_upload_model_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_upload_model_sync.py
@@ -28,8 +28,6 @@
def sample_upload_model():
- """Snippet for upload_model"""
-
# Create a client
client = aiplatform_v1.ModelServiceClient()
@@ -38,7 +36,7 @@ def sample_upload_model():
model.display_name = "display_name_value"
request = aiplatform_v1.UploadModelRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
model=model,
)
@@ -48,6 +46,8 @@ def sample_upload_model():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_ModelService_UploadModel_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_cancel_pipeline_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_cancel_pipeline_job_async.py
index 0056482715..7c4cef8bb9 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_cancel_pipeline_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_cancel_pipeline_job_async.py
@@ -28,18 +28,16 @@
async def sample_cancel_pipeline_job():
- """Snippet for cancel_pipeline_job"""
-
# Create a client
client = aiplatform_v1.PipelineServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.CancelPipelineJobRequest(
- name="projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}",
+ name="name_value",
)
# Make the request
- response = await client.cancel_pipeline_job(request=request)
+ await client.cancel_pipeline_job(request=request)
# [END aiplatform_generated_aiplatform_v1_PipelineService_CancelPipelineJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_cancel_pipeline_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_cancel_pipeline_job_sync.py
index 1d9f27b960..f3add9bcea 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_cancel_pipeline_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_cancel_pipeline_job_sync.py
@@ -28,18 +28,16 @@
def sample_cancel_pipeline_job():
- """Snippet for cancel_pipeline_job"""
-
# Create a client
client = aiplatform_v1.PipelineServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.CancelPipelineJobRequest(
- name="projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}",
+ name="name_value",
)
# Make the request
- response = client.cancel_pipeline_job(request=request)
+ client.cancel_pipeline_job(request=request)
# [END aiplatform_generated_aiplatform_v1_PipelineService_CancelPipelineJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_cancel_training_pipeline_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_cancel_training_pipeline_async.py
index 9289e25e3e..75969857c4 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_cancel_training_pipeline_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_cancel_training_pipeline_async.py
@@ -28,18 +28,16 @@
async def sample_cancel_training_pipeline():
- """Snippet for cancel_training_pipeline"""
-
# Create a client
client = aiplatform_v1.PipelineServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.CancelTrainingPipelineRequest(
- name="projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}",
+ name="name_value",
)
# Make the request
- response = await client.cancel_training_pipeline(request=request)
+ await client.cancel_training_pipeline(request=request)
# [END aiplatform_generated_aiplatform_v1_PipelineService_CancelTrainingPipeline_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_cancel_training_pipeline_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_cancel_training_pipeline_sync.py
index 5c1dee977d..2cfd8c5a29 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_cancel_training_pipeline_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_cancel_training_pipeline_sync.py
@@ -28,18 +28,16 @@
def sample_cancel_training_pipeline():
- """Snippet for cancel_training_pipeline"""
-
# Create a client
client = aiplatform_v1.PipelineServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.CancelTrainingPipelineRequest(
- name="projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}",
+ name="name_value",
)
# Make the request
- response = client.cancel_training_pipeline(request=request)
+ client.cancel_training_pipeline(request=request)
# [END aiplatform_generated_aiplatform_v1_PipelineService_CancelTrainingPipeline_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_create_pipeline_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_create_pipeline_job_async.py
index c407bbc520..0ea8f783ef 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_create_pipeline_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_create_pipeline_job_async.py
@@ -28,20 +28,18 @@
async def sample_create_pipeline_job():
- """Snippet for create_pipeline_job"""
-
# Create a client
client = aiplatform_v1.PipelineServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.CreatePipelineJobRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
response = await client.create_pipeline_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_PipelineService_CreatePipelineJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_create_pipeline_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_create_pipeline_job_sync.py
index 68b42cb9f1..4c40a7a3d7 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_create_pipeline_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_create_pipeline_job_sync.py
@@ -28,20 +28,18 @@
def sample_create_pipeline_job():
- """Snippet for create_pipeline_job"""
-
# Create a client
client = aiplatform_v1.PipelineServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.CreatePipelineJobRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
response = client.create_pipeline_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_PipelineService_CreatePipelineJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_create_training_pipeline_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_create_training_pipeline_async.py
index 63b74a58bc..0278845dc9 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_create_training_pipeline_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_create_training_pipeline_async.py
@@ -28,8 +28,6 @@
async def sample_create_training_pipeline():
- """Snippet for create_training_pipeline"""
-
# Create a client
client = aiplatform_v1.PipelineServiceAsyncClient()
@@ -40,14 +38,14 @@ async def sample_create_training_pipeline():
training_pipeline.training_task_inputs.null_value = "NULL_VALUE"
request = aiplatform_v1.CreateTrainingPipelineRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
training_pipeline=training_pipeline,
)
# Make the request
response = await client.create_training_pipeline(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_PipelineService_CreateTrainingPipeline_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_create_training_pipeline_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_create_training_pipeline_sync.py
index 15721f56e2..ff4ab02c66 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_create_training_pipeline_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_create_training_pipeline_sync.py
@@ -28,8 +28,6 @@
def sample_create_training_pipeline():
- """Snippet for create_training_pipeline"""
-
# Create a client
client = aiplatform_v1.PipelineServiceClient()
@@ -40,14 +38,14 @@ def sample_create_training_pipeline():
training_pipeline.training_task_inputs.null_value = "NULL_VALUE"
request = aiplatform_v1.CreateTrainingPipelineRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
training_pipeline=training_pipeline,
)
# Make the request
response = client.create_training_pipeline(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_PipelineService_CreateTrainingPipeline_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_delete_pipeline_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_delete_pipeline_job_async.py
index 3db0435663..fc29d57550 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_delete_pipeline_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_delete_pipeline_job_async.py
@@ -28,14 +28,12 @@
async def sample_delete_pipeline_job():
- """Snippet for delete_pipeline_job"""
-
# Create a client
client = aiplatform_v1.PipelineServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.DeletePipelineJobRequest(
- name="projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_pipeline_job():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_PipelineService_DeletePipelineJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_delete_pipeline_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_delete_pipeline_job_sync.py
index 7433442a0f..48404a7bde 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_delete_pipeline_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_delete_pipeline_job_sync.py
@@ -28,14 +28,12 @@
def sample_delete_pipeline_job():
- """Snippet for delete_pipeline_job"""
-
# Create a client
client = aiplatform_v1.PipelineServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.DeletePipelineJobRequest(
- name="projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_pipeline_job():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_PipelineService_DeletePipelineJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_delete_training_pipeline_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_delete_training_pipeline_async.py
index 717870e689..0a778f00ed 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_delete_training_pipeline_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_delete_training_pipeline_async.py
@@ -28,14 +28,12 @@
async def sample_delete_training_pipeline():
- """Snippet for delete_training_pipeline"""
-
# Create a client
client = aiplatform_v1.PipelineServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteTrainingPipelineRequest(
- name="projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_training_pipeline():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_PipelineService_DeleteTrainingPipeline_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_delete_training_pipeline_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_delete_training_pipeline_sync.py
index 41ffbbe6f1..9f7fcfa042 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_delete_training_pipeline_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_delete_training_pipeline_sync.py
@@ -28,14 +28,12 @@
def sample_delete_training_pipeline():
- """Snippet for delete_training_pipeline"""
-
# Create a client
client = aiplatform_v1.PipelineServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteTrainingPipelineRequest(
- name="projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_training_pipeline():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_PipelineService_DeleteTrainingPipeline_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_get_pipeline_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_get_pipeline_job_async.py
index bc4bdc0e2f..d848a3dc17 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_get_pipeline_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_get_pipeline_job_async.py
@@ -28,20 +28,18 @@
async def sample_get_pipeline_job():
- """Snippet for get_pipeline_job"""
-
# Create a client
client = aiplatform_v1.PipelineServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.GetPipelineJobRequest(
- name="projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}",
+ name="name_value",
)
# Make the request
response = await client.get_pipeline_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_PipelineService_GetPipelineJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_get_pipeline_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_get_pipeline_job_sync.py
index c07d487a59..63a4297594 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_get_pipeline_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_get_pipeline_job_sync.py
@@ -28,20 +28,18 @@
def sample_get_pipeline_job():
- """Snippet for get_pipeline_job"""
-
# Create a client
client = aiplatform_v1.PipelineServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetPipelineJobRequest(
- name="projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}",
+ name="name_value",
)
# Make the request
response = client.get_pipeline_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_PipelineService_GetPipelineJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_get_training_pipeline_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_get_training_pipeline_async.py
index 3ac5e49c38..41b88979e6 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_get_training_pipeline_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_get_training_pipeline_async.py
@@ -28,20 +28,18 @@
async def sample_get_training_pipeline():
- """Snippet for get_training_pipeline"""
-
# Create a client
client = aiplatform_v1.PipelineServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.GetTrainingPipelineRequest(
- name="projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}",
+ name="name_value",
)
# Make the request
response = await client.get_training_pipeline(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_PipelineService_GetTrainingPipeline_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_get_training_pipeline_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_get_training_pipeline_sync.py
index 21854206ba..ab97bf6546 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_get_training_pipeline_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_get_training_pipeline_sync.py
@@ -28,20 +28,18 @@
def sample_get_training_pipeline():
- """Snippet for get_training_pipeline"""
-
# Create a client
client = aiplatform_v1.PipelineServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetTrainingPipelineRequest(
- name="projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}",
+ name="name_value",
)
# Make the request
response = client.get_training_pipeline(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_PipelineService_GetTrainingPipeline_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_list_pipeline_jobs_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_list_pipeline_jobs_async.py
index e11a96fa97..c177f976f3 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_list_pipeline_jobs_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_list_pipeline_jobs_async.py
@@ -28,18 +28,18 @@
async def sample_list_pipeline_jobs():
- """Snippet for list_pipeline_jobs"""
-
# Create a client
client = aiplatform_v1.PipelineServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.ListPipelineJobsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_pipeline_jobs(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_list_pipeline_jobs_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_list_pipeline_jobs_sync.py
index e3b8b666cf..8a0853fcbb 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_list_pipeline_jobs_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_list_pipeline_jobs_sync.py
@@ -28,18 +28,18 @@
def sample_list_pipeline_jobs():
- """Snippet for list_pipeline_jobs"""
-
# Create a client
client = aiplatform_v1.PipelineServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListPipelineJobsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_pipeline_jobs(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_list_training_pipelines_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_list_training_pipelines_async.py
index d12d8e8d35..785927f3c2 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_list_training_pipelines_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_list_training_pipelines_async.py
@@ -28,18 +28,18 @@
async def sample_list_training_pipelines():
- """Snippet for list_training_pipelines"""
-
# Create a client
client = aiplatform_v1.PipelineServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.ListTrainingPipelinesRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_training_pipelines(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_list_training_pipelines_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_list_training_pipelines_sync.py
index 7f2c26b875..e752a91599 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_list_training_pipelines_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_pipeline_service_list_training_pipelines_sync.py
@@ -28,18 +28,18 @@
def sample_list_training_pipelines():
- """Snippet for list_training_pipelines"""
-
# Create a client
client = aiplatform_v1.PipelineServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListTrainingPipelinesRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_training_pipelines(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_prediction_service_explain_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_prediction_service_explain_async.py
index 8cf002070f..8a75e04569 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_prediction_service_explain_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_prediction_service_explain_async.py
@@ -28,8 +28,6 @@
async def sample_explain():
- """Snippet for explain"""
-
# Create a client
client = aiplatform_v1.PredictionServiceAsyncClient()
@@ -38,14 +36,14 @@ async def sample_explain():
instances.null_value = "NULL_VALUE"
request = aiplatform_v1.ExplainRequest(
- endpoint="projects/{project}/locations/{location}/endpoints/{endpoint}",
+ endpoint="endpoint_value",
instances=instances,
)
# Make the request
response = await client.explain(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_PredictionService_Explain_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_prediction_service_explain_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_prediction_service_explain_sync.py
index 1a43475a6c..2a46d842b2 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_prediction_service_explain_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_prediction_service_explain_sync.py
@@ -28,8 +28,6 @@
def sample_explain():
- """Snippet for explain"""
-
# Create a client
client = aiplatform_v1.PredictionServiceClient()
@@ -38,14 +36,14 @@ def sample_explain():
instances.null_value = "NULL_VALUE"
request = aiplatform_v1.ExplainRequest(
- endpoint="projects/{project}/locations/{location}/endpoints/{endpoint}",
+ endpoint="endpoint_value",
instances=instances,
)
# Make the request
response = client.explain(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_PredictionService_Explain_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_prediction_service_predict_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_prediction_service_predict_async.py
index 6575d35bb6..c255068ae6 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_prediction_service_predict_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_prediction_service_predict_async.py
@@ -28,8 +28,6 @@
async def sample_predict():
- """Snippet for predict"""
-
# Create a client
client = aiplatform_v1.PredictionServiceAsyncClient()
@@ -38,14 +36,14 @@ async def sample_predict():
instances.null_value = "NULL_VALUE"
request = aiplatform_v1.PredictRequest(
- endpoint="projects/{project}/locations/{location}/endpoints/{endpoint}",
+ endpoint="endpoint_value",
instances=instances,
)
# Make the request
response = await client.predict(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_PredictionService_Predict_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_prediction_service_predict_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_prediction_service_predict_sync.py
index 70a93a4a53..268eeb8d3b 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_prediction_service_predict_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_prediction_service_predict_sync.py
@@ -28,8 +28,6 @@
def sample_predict():
- """Snippet for predict"""
-
# Create a client
client = aiplatform_v1.PredictionServiceClient()
@@ -38,14 +36,14 @@ def sample_predict():
instances.null_value = "NULL_VALUE"
request = aiplatform_v1.PredictRequest(
- endpoint="projects/{project}/locations/{location}/endpoints/{endpoint}",
+ endpoint="endpoint_value",
instances=instances,
)
# Make the request
response = client.predict(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_PredictionService_Predict_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_prediction_service_raw_predict_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_prediction_service_raw_predict_async.py
index 052e7862ac..6733db3d6c 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_prediction_service_raw_predict_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_prediction_service_raw_predict_async.py
@@ -28,20 +28,18 @@
async def sample_raw_predict():
- """Snippet for raw_predict"""
-
# Create a client
client = aiplatform_v1.PredictionServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.RawPredictRequest(
- endpoint="projects/{project}/locations/{location}/endpoints/{endpoint}",
+ endpoint="endpoint_value",
)
# Make the request
response = await client.raw_predict(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_PredictionService_RawPredict_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_prediction_service_raw_predict_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_prediction_service_raw_predict_sync.py
index da8833c07f..ee69a2fef3 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_prediction_service_raw_predict_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_prediction_service_raw_predict_sync.py
@@ -28,20 +28,18 @@
def sample_raw_predict():
- """Snippet for raw_predict"""
-
# Create a client
client = aiplatform_v1.PredictionServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.RawPredictRequest(
- endpoint="projects/{project}/locations/{location}/endpoints/{endpoint}",
+ endpoint="endpoint_value",
)
# Make the request
response = client.raw_predict(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_PredictionService_RawPredict_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_create_specialist_pool_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_create_specialist_pool_async.py
index 6f988a8525..b229a9bc71 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_create_specialist_pool_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_create_specialist_pool_async.py
@@ -28,8 +28,6 @@
async def sample_create_specialist_pool():
- """Snippet for create_specialist_pool"""
-
# Create a client
client = aiplatform_v1.SpecialistPoolServiceAsyncClient()
@@ -39,7 +37,7 @@ async def sample_create_specialist_pool():
specialist_pool.display_name = "display_name_value"
request = aiplatform_v1.CreateSpecialistPoolRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
specialist_pool=specialist_pool,
)
@@ -49,6 +47,8 @@ async def sample_create_specialist_pool():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_SpecialistPoolService_CreateSpecialistPool_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_create_specialist_pool_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_create_specialist_pool_sync.py
index fcbfc051d5..74dc071a48 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_create_specialist_pool_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_create_specialist_pool_sync.py
@@ -28,8 +28,6 @@
def sample_create_specialist_pool():
- """Snippet for create_specialist_pool"""
-
# Create a client
client = aiplatform_v1.SpecialistPoolServiceClient()
@@ -39,7 +37,7 @@ def sample_create_specialist_pool():
specialist_pool.display_name = "display_name_value"
request = aiplatform_v1.CreateSpecialistPoolRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
specialist_pool=specialist_pool,
)
@@ -49,6 +47,8 @@ def sample_create_specialist_pool():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_SpecialistPoolService_CreateSpecialistPool_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_delete_specialist_pool_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_delete_specialist_pool_async.py
index 703711426e..558ea7ed13 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_delete_specialist_pool_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_delete_specialist_pool_async.py
@@ -28,14 +28,12 @@
async def sample_delete_specialist_pool():
- """Snippet for delete_specialist_pool"""
-
# Create a client
client = aiplatform_v1.SpecialistPoolServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteSpecialistPoolRequest(
- name="projects/{project}/locations/{location}/specialistPools/{specialist_pool}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_specialist_pool():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_SpecialistPoolService_DeleteSpecialistPool_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_delete_specialist_pool_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_delete_specialist_pool_sync.py
index 1eef978d9f..69ce2ddd2b 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_delete_specialist_pool_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_delete_specialist_pool_sync.py
@@ -28,14 +28,12 @@
def sample_delete_specialist_pool():
- """Snippet for delete_specialist_pool"""
-
# Create a client
client = aiplatform_v1.SpecialistPoolServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteSpecialistPoolRequest(
- name="projects/{project}/locations/{location}/specialistPools/{specialist_pool}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_specialist_pool():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_SpecialistPoolService_DeleteSpecialistPool_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_get_specialist_pool_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_get_specialist_pool_async.py
index 5a4099ad9d..0c07bd6c58 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_get_specialist_pool_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_get_specialist_pool_async.py
@@ -28,20 +28,18 @@
async def sample_get_specialist_pool():
- """Snippet for get_specialist_pool"""
-
# Create a client
client = aiplatform_v1.SpecialistPoolServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.GetSpecialistPoolRequest(
- name="projects/{project}/locations/{location}/specialistPools/{specialist_pool}",
+ name="name_value",
)
# Make the request
response = await client.get_specialist_pool(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_SpecialistPoolService_GetSpecialistPool_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_get_specialist_pool_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_get_specialist_pool_sync.py
index 20cb00be23..41408bc876 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_get_specialist_pool_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_get_specialist_pool_sync.py
@@ -28,20 +28,18 @@
def sample_get_specialist_pool():
- """Snippet for get_specialist_pool"""
-
# Create a client
client = aiplatform_v1.SpecialistPoolServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetSpecialistPoolRequest(
- name="projects/{project}/locations/{location}/specialistPools/{specialist_pool}",
+ name="name_value",
)
# Make the request
response = client.get_specialist_pool(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_SpecialistPoolService_GetSpecialistPool_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_list_specialist_pools_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_list_specialist_pools_async.py
index 8b9737d282..53b6537503 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_list_specialist_pools_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_list_specialist_pools_async.py
@@ -28,18 +28,18 @@
async def sample_list_specialist_pools():
- """Snippet for list_specialist_pools"""
-
# Create a client
client = aiplatform_v1.SpecialistPoolServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.ListSpecialistPoolsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_specialist_pools(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_list_specialist_pools_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_list_specialist_pools_sync.py
index 0a8385abd1..8b6d07fa0f 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_list_specialist_pools_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_list_specialist_pools_sync.py
@@ -28,18 +28,18 @@
def sample_list_specialist_pools():
- """Snippet for list_specialist_pools"""
-
# Create a client
client = aiplatform_v1.SpecialistPoolServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListSpecialistPoolsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_specialist_pools(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_update_specialist_pool_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_update_specialist_pool_async.py
index ce231b0bc0..a7a37310cc 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_update_specialist_pool_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_update_specialist_pool_async.py
@@ -28,8 +28,6 @@
async def sample_update_specialist_pool():
- """Snippet for update_specialist_pool"""
-
# Create a client
client = aiplatform_v1.SpecialistPoolServiceAsyncClient()
@@ -48,6 +46,8 @@ async def sample_update_specialist_pool():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_SpecialistPoolService_UpdateSpecialistPool_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_update_specialist_pool_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_update_specialist_pool_sync.py
index e66193b62b..c49446bb60 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_update_specialist_pool_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_specialist_pool_service_update_specialist_pool_sync.py
@@ -28,8 +28,6 @@
def sample_update_specialist_pool():
- """Snippet for update_specialist_pool"""
-
# Create a client
client = aiplatform_v1.SpecialistPoolServiceClient()
@@ -48,6 +46,8 @@ def sample_update_specialist_pool():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_SpecialistPoolService_UpdateSpecialistPool_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_batch_create_tensorboard_runs_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_batch_create_tensorboard_runs_async.py
new file mode 100644
index 0000000000..71a29f27dc
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_batch_create_tensorboard_runs_async.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for BatchCreateTensorboardRuns
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_BatchCreateTensorboardRuns_async]
+from google.cloud import aiplatform_v1
+
+
+async def sample_batch_create_tensorboard_runs():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceAsyncClient()
+
+ # Initialize request argument(s)
+ requests = aiplatform_v1.CreateTensorboardRunRequest()
+ requests.parent = "parent_value"
+ requests.tensorboard_run.display_name = "display_name_value"
+ requests.tensorboard_run_id = "tensorboard_run_id_value"
+
+ request = aiplatform_v1.BatchCreateTensorboardRunsRequest(
+ parent="parent_value",
+ requests=requests,
+ )
+
+ # Make the request
+ response = await client.batch_create_tensorboard_runs(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_BatchCreateTensorboardRuns_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_batch_create_tensorboard_runs_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_batch_create_tensorboard_runs_sync.py
new file mode 100644
index 0000000000..3dd77f1d3d
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_batch_create_tensorboard_runs_sync.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for BatchCreateTensorboardRuns
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_BatchCreateTensorboardRuns_sync]
+from google.cloud import aiplatform_v1
+
+
+def sample_batch_create_tensorboard_runs():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ requests = aiplatform_v1.CreateTensorboardRunRequest()
+ requests.parent = "parent_value"
+ requests.tensorboard_run.display_name = "display_name_value"
+ requests.tensorboard_run_id = "tensorboard_run_id_value"
+
+ request = aiplatform_v1.BatchCreateTensorboardRunsRequest(
+ parent="parent_value",
+ requests=requests,
+ )
+
+ # Make the request
+ response = client.batch_create_tensorboard_runs(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_BatchCreateTensorboardRuns_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_batch_create_tensorboard_time_series_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_batch_create_tensorboard_time_series_async.py
new file mode 100644
index 0000000000..e653039e53
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_batch_create_tensorboard_time_series_async.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for BatchCreateTensorboardTimeSeries
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_BatchCreateTensorboardTimeSeries_async]
+from google.cloud import aiplatform_v1
+
+
+async def sample_batch_create_tensorboard_time_series():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceAsyncClient()
+
+ # Initialize request argument(s)
+ requests = aiplatform_v1.CreateTensorboardTimeSeriesRequest()
+ requests.parent = "parent_value"
+ requests.tensorboard_time_series.display_name = "display_name_value"
+ requests.tensorboard_time_series.value_type = "BLOB_SEQUENCE"
+
+ request = aiplatform_v1.BatchCreateTensorboardTimeSeriesRequest(
+ parent="parent_value",
+ requests=requests,
+ )
+
+ # Make the request
+ response = await client.batch_create_tensorboard_time_series(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_BatchCreateTensorboardTimeSeries_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_batch_create_tensorboard_time_series_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_batch_create_tensorboard_time_series_sync.py
new file mode 100644
index 0000000000..b0e438caa3
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_batch_create_tensorboard_time_series_sync.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for BatchCreateTensorboardTimeSeries
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_BatchCreateTensorboardTimeSeries_sync]
+from google.cloud import aiplatform_v1
+
+
+def sample_batch_create_tensorboard_time_series():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ requests = aiplatform_v1.CreateTensorboardTimeSeriesRequest()
+ requests.parent = "parent_value"
+ requests.tensorboard_time_series.display_name = "display_name_value"
+ requests.tensorboard_time_series.value_type = "BLOB_SEQUENCE"
+
+ request = aiplatform_v1.BatchCreateTensorboardTimeSeriesRequest(
+ parent="parent_value",
+ requests=requests,
+ )
+
+ # Make the request
+ response = client.batch_create_tensorboard_time_series(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_BatchCreateTensorboardTimeSeries_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_batch_read_tensorboard_time_series_data_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_batch_read_tensorboard_time_series_data_async.py
new file mode 100644
index 0000000000..9313b76440
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_batch_read_tensorboard_time_series_data_async.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for BatchReadTensorboardTimeSeriesData
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_BatchReadTensorboardTimeSeriesData_async]
+from google.cloud import aiplatform_v1
+
+
+async def sample_batch_read_tensorboard_time_series_data():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceAsyncClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.BatchReadTensorboardTimeSeriesDataRequest(
+ tensorboard="tensorboard_value",
+ time_series=['time_series_value_1', 'time_series_value_2'],
+ )
+
+ # Make the request
+ response = await client.batch_read_tensorboard_time_series_data(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_BatchReadTensorboardTimeSeriesData_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_batch_read_tensorboard_time_series_data_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_batch_read_tensorboard_time_series_data_sync.py
new file mode 100644
index 0000000000..916b2fe0ac
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_batch_read_tensorboard_time_series_data_sync.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for BatchReadTensorboardTimeSeriesData
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_BatchReadTensorboardTimeSeriesData_sync]
+from google.cloud import aiplatform_v1
+
+
+def sample_batch_read_tensorboard_time_series_data():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.BatchReadTensorboardTimeSeriesDataRequest(
+ tensorboard="tensorboard_value",
+ time_series=['time_series_value_1', 'time_series_value_2'],
+ )
+
+ # Make the request
+ response = client.batch_read_tensorboard_time_series_data(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_BatchReadTensorboardTimeSeriesData_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_create_tensorboard_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_create_tensorboard_async.py
new file mode 100644
index 0000000000..69325d83bc
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_create_tensorboard_async.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for CreateTensorboard
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_CreateTensorboard_async]
+from google.cloud import aiplatform_v1
+
+
+async def sample_create_tensorboard():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceAsyncClient()
+
+ # Initialize request argument(s)
+ tensorboard = aiplatform_v1.Tensorboard()
+ tensorboard.display_name = "display_name_value"
+
+ request = aiplatform_v1.CreateTensorboardRequest(
+ parent="parent_value",
+ tensorboard=tensorboard,
+ )
+
+ # Make the request
+ operation = client.create_tensorboard(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = await operation.result()
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_CreateTensorboard_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_create_tensorboard_experiment_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_create_tensorboard_experiment_async.py
new file mode 100644
index 0000000000..65cb1f8338
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_create_tensorboard_experiment_async.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for CreateTensorboardExperiment
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_CreateTensorboardExperiment_async]
+from google.cloud import aiplatform_v1
+
+
+async def sample_create_tensorboard_experiment():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceAsyncClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.CreateTensorboardExperimentRequest(
+ parent="parent_value",
+ tensorboard_experiment_id="tensorboard_experiment_id_value",
+ )
+
+ # Make the request
+ response = await client.create_tensorboard_experiment(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_CreateTensorboardExperiment_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_create_tensorboard_experiment_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_create_tensorboard_experiment_sync.py
new file mode 100644
index 0000000000..1a41e2342a
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_create_tensorboard_experiment_sync.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for CreateTensorboardExperiment
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_CreateTensorboardExperiment_sync]
+from google.cloud import aiplatform_v1
+
+
+def sample_create_tensorboard_experiment():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.CreateTensorboardExperimentRequest(
+ parent="parent_value",
+ tensorboard_experiment_id="tensorboard_experiment_id_value",
+ )
+
+ # Make the request
+ response = client.create_tensorboard_experiment(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_CreateTensorboardExperiment_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_create_tensorboard_run_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_create_tensorboard_run_async.py
new file mode 100644
index 0000000000..943526a805
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_create_tensorboard_run_async.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for CreateTensorboardRun
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_CreateTensorboardRun_async]
+from google.cloud import aiplatform_v1
+
+
+async def sample_create_tensorboard_run():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceAsyncClient()
+
+ # Initialize request argument(s)
+ tensorboard_run = aiplatform_v1.TensorboardRun()
+ tensorboard_run.display_name = "display_name_value"
+
+ request = aiplatform_v1.CreateTensorboardRunRequest(
+ parent="parent_value",
+ tensorboard_run=tensorboard_run,
+ tensorboard_run_id="tensorboard_run_id_value",
+ )
+
+ # Make the request
+ response = await client.create_tensorboard_run(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_CreateTensorboardRun_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_create_tensorboard_run_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_create_tensorboard_run_sync.py
new file mode 100644
index 0000000000..009bb07540
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_create_tensorboard_run_sync.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for CreateTensorboardRun
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_CreateTensorboardRun_sync]
+from google.cloud import aiplatform_v1
+
+
+def sample_create_tensorboard_run():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ tensorboard_run = aiplatform_v1.TensorboardRun()
+ tensorboard_run.display_name = "display_name_value"
+
+ request = aiplatform_v1.CreateTensorboardRunRequest(
+ parent="parent_value",
+ tensorboard_run=tensorboard_run,
+ tensorboard_run_id="tensorboard_run_id_value",
+ )
+
+ # Make the request
+ response = client.create_tensorboard_run(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_CreateTensorboardRun_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_create_tensorboard_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_create_tensorboard_sync.py
new file mode 100644
index 0000000000..2f73d76774
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_create_tensorboard_sync.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for CreateTensorboard
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_CreateTensorboard_sync]
+from google.cloud import aiplatform_v1
+
+
+def sample_create_tensorboard():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ tensorboard = aiplatform_v1.Tensorboard()
+ tensorboard.display_name = "display_name_value"
+
+ request = aiplatform_v1.CreateTensorboardRequest(
+ parent="parent_value",
+ tensorboard=tensorboard,
+ )
+
+ # Make the request
+ operation = client.create_tensorboard(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_CreateTensorboard_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_create_tensorboard_time_series_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_create_tensorboard_time_series_async.py
new file mode 100644
index 0000000000..9996992c76
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_create_tensorboard_time_series_async.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for CreateTensorboardTimeSeries
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_CreateTensorboardTimeSeries_async]
+from google.cloud import aiplatform_v1
+
+
+async def sample_create_tensorboard_time_series():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceAsyncClient()
+
+ # Initialize request argument(s)
+ tensorboard_time_series = aiplatform_v1.TensorboardTimeSeries()
+ tensorboard_time_series.display_name = "display_name_value"
+ tensorboard_time_series.value_type = "BLOB_SEQUENCE"
+
+ request = aiplatform_v1.CreateTensorboardTimeSeriesRequest(
+ parent="parent_value",
+ tensorboard_time_series=tensorboard_time_series,
+ )
+
+ # Make the request
+ response = await client.create_tensorboard_time_series(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_CreateTensorboardTimeSeries_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_create_tensorboard_time_series_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_create_tensorboard_time_series_sync.py
new file mode 100644
index 0000000000..935c268b60
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_create_tensorboard_time_series_sync.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for CreateTensorboardTimeSeries
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_CreateTensorboardTimeSeries_sync]
+from google.cloud import aiplatform_v1
+
+
+def sample_create_tensorboard_time_series():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ tensorboard_time_series = aiplatform_v1.TensorboardTimeSeries()
+ tensorboard_time_series.display_name = "display_name_value"
+ tensorboard_time_series.value_type = "BLOB_SEQUENCE"
+
+ request = aiplatform_v1.CreateTensorboardTimeSeriesRequest(
+ parent="parent_value",
+ tensorboard_time_series=tensorboard_time_series,
+ )
+
+ # Make the request
+ response = client.create_tensorboard_time_series(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_CreateTensorboardTimeSeries_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_delete_tensorboard_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_delete_tensorboard_async.py
new file mode 100644
index 0000000000..c77bcde03c
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_delete_tensorboard_async.py
@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for DeleteTensorboard
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_DeleteTensorboard_async]
+from google.cloud import aiplatform_v1
+
+
+async def sample_delete_tensorboard():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceAsyncClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteTensorboardRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_tensorboard(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = await operation.result()
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_DeleteTensorboard_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_delete_tensorboard_experiment_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_delete_tensorboard_experiment_async.py
new file mode 100644
index 0000000000..54d8dd34db
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_delete_tensorboard_experiment_async.py
@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for DeleteTensorboardExperiment
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_DeleteTensorboardExperiment_async]
+from google.cloud import aiplatform_v1
+
+
+async def sample_delete_tensorboard_experiment():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceAsyncClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteTensorboardExperimentRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_tensorboard_experiment(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = await operation.result()
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_DeleteTensorboardExperiment_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_delete_tensorboard_experiment_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_delete_tensorboard_experiment_sync.py
new file mode 100644
index 0000000000..d24488c736
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_delete_tensorboard_experiment_sync.py
@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for DeleteTensorboardExperiment
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_DeleteTensorboardExperiment_sync]
+from google.cloud import aiplatform_v1
+
+
+def sample_delete_tensorboard_experiment():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteTensorboardExperimentRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_tensorboard_experiment(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_DeleteTensorboardExperiment_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_delete_tensorboard_run_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_delete_tensorboard_run_async.py
new file mode 100644
index 0000000000..b99937d851
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_delete_tensorboard_run_async.py
@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for DeleteTensorboardRun
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_DeleteTensorboardRun_async]
+from google.cloud import aiplatform_v1
+
+
+async def sample_delete_tensorboard_run():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceAsyncClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteTensorboardRunRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_tensorboard_run(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = await operation.result()
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_DeleteTensorboardRun_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_delete_tensorboard_run_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_delete_tensorboard_run_sync.py
new file mode 100644
index 0000000000..61f28f0748
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_delete_tensorboard_run_sync.py
@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for DeleteTensorboardRun
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_DeleteTensorboardRun_sync]
+from google.cloud import aiplatform_v1
+
+
+def sample_delete_tensorboard_run():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteTensorboardRunRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_tensorboard_run(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_DeleteTensorboardRun_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_delete_tensorboard_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_delete_tensorboard_sync.py
new file mode 100644
index 0000000000..13c9e69ebd
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_delete_tensorboard_sync.py
@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for DeleteTensorboard
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_DeleteTensorboard_sync]
+from google.cloud import aiplatform_v1
+
+
+def sample_delete_tensorboard():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteTensorboardRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_tensorboard(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_DeleteTensorboard_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_delete_tensorboard_time_series_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_delete_tensorboard_time_series_async.py
new file mode 100644
index 0000000000..4c235bf17f
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_delete_tensorboard_time_series_async.py
@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for DeleteTensorboardTimeSeries
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_DeleteTensorboardTimeSeries_async]
+from google.cloud import aiplatform_v1
+
+
+async def sample_delete_tensorboard_time_series():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceAsyncClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteTensorboardTimeSeriesRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_tensorboard_time_series(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = await operation.result()
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_DeleteTensorboardTimeSeries_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_delete_tensorboard_time_series_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_delete_tensorboard_time_series_sync.py
new file mode 100644
index 0000000000..5cae0cc839
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_delete_tensorboard_time_series_sync.py
@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for DeleteTensorboardTimeSeries
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_DeleteTensorboardTimeSeries_sync]
+from google.cloud import aiplatform_v1
+
+
+def sample_delete_tensorboard_time_series():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.DeleteTensorboardTimeSeriesRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ operation = client.delete_tensorboard_time_series(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_DeleteTensorboardTimeSeries_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_export_tensorboard_time_series_data_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_export_tensorboard_time_series_data_async.py
new file mode 100644
index 0000000000..19d50ba50e
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_export_tensorboard_time_series_data_async.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for ExportTensorboardTimeSeriesData
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_ExportTensorboardTimeSeriesData_async]
+from google.cloud import aiplatform_v1
+
+
+async def sample_export_tensorboard_time_series_data():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceAsyncClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ExportTensorboardTimeSeriesDataRequest(
+ tensorboard_time_series="tensorboard_time_series_value",
+ )
+
+ # Make the request
+ page_result = client.export_tensorboard_time_series_data(request=request)
+
+ # Handle the response
+ async for response in page_result:
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_ExportTensorboardTimeSeriesData_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_export_tensorboard_time_series_data_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_export_tensorboard_time_series_data_sync.py
new file mode 100644
index 0000000000..0513c1d6ab
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_export_tensorboard_time_series_data_sync.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for ExportTensorboardTimeSeriesData
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_ExportTensorboardTimeSeriesData_sync]
+from google.cloud import aiplatform_v1
+
+
+def sample_export_tensorboard_time_series_data():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ExportTensorboardTimeSeriesDataRequest(
+ tensorboard_time_series="tensorboard_time_series_value",
+ )
+
+ # Make the request
+ page_result = client.export_tensorboard_time_series_data(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_ExportTensorboardTimeSeriesData_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_get_tensorboard_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_get_tensorboard_async.py
new file mode 100644
index 0000000000..0dffe60e2d
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_get_tensorboard_async.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for GetTensorboard
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_GetTensorboard_async]
+from google.cloud import aiplatform_v1
+
+
+async def sample_get_tensorboard():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceAsyncClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetTensorboardRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = await client.get_tensorboard(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_GetTensorboard_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_get_tensorboard_experiment_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_get_tensorboard_experiment_async.py
new file mode 100644
index 0000000000..51df65ff18
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_get_tensorboard_experiment_async.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for GetTensorboardExperiment
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_GetTensorboardExperiment_async]
+from google.cloud import aiplatform_v1
+
+
+async def sample_get_tensorboard_experiment():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceAsyncClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetTensorboardExperimentRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = await client.get_tensorboard_experiment(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_GetTensorboardExperiment_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_get_tensorboard_experiment_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_get_tensorboard_experiment_sync.py
new file mode 100644
index 0000000000..99d8b725e9
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_get_tensorboard_experiment_sync.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for GetTensorboardExperiment
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_GetTensorboardExperiment_sync]
+from google.cloud import aiplatform_v1
+
+
+def sample_get_tensorboard_experiment():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetTensorboardExperimentRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_tensorboard_experiment(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_GetTensorboardExperiment_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_get_tensorboard_run_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_get_tensorboard_run_async.py
new file mode 100644
index 0000000000..7a504549d1
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_get_tensorboard_run_async.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for GetTensorboardRun
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_GetTensorboardRun_async]
+from google.cloud import aiplatform_v1
+
+
+async def sample_get_tensorboard_run():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceAsyncClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetTensorboardRunRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = await client.get_tensorboard_run(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_GetTensorboardRun_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_get_tensorboard_run_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_get_tensorboard_run_sync.py
new file mode 100644
index 0000000000..47558e19ba
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_get_tensorboard_run_sync.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for GetTensorboardRun
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_GetTensorboardRun_sync]
+from google.cloud import aiplatform_v1
+
+
+def sample_get_tensorboard_run():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetTensorboardRunRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_tensorboard_run(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_GetTensorboardRun_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_get_tensorboard_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_get_tensorboard_sync.py
new file mode 100644
index 0000000000..5d84ccbe1f
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_get_tensorboard_sync.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for GetTensorboard
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_GetTensorboard_sync]
+from google.cloud import aiplatform_v1
+
+
+def sample_get_tensorboard():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetTensorboardRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_tensorboard(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_GetTensorboard_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_get_tensorboard_time_series_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_get_tensorboard_time_series_async.py
new file mode 100644
index 0000000000..67220f3fac
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_get_tensorboard_time_series_async.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for GetTensorboardTimeSeries
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_GetTensorboardTimeSeries_async]
+from google.cloud import aiplatform_v1
+
+
+async def sample_get_tensorboard_time_series():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceAsyncClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetTensorboardTimeSeriesRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = await client.get_tensorboard_time_series(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_GetTensorboardTimeSeries_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_get_tensorboard_time_series_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_get_tensorboard_time_series_sync.py
new file mode 100644
index 0000000000..463c9f08c0
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_get_tensorboard_time_series_sync.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for GetTensorboardTimeSeries
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_GetTensorboardTimeSeries_sync]
+from google.cloud import aiplatform_v1
+
+
+def sample_get_tensorboard_time_series():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.GetTensorboardTimeSeriesRequest(
+ name="name_value",
+ )
+
+ # Make the request
+ response = client.get_tensorboard_time_series(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_GetTensorboardTimeSeries_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_list_tensorboard_experiments_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_list_tensorboard_experiments_async.py
new file mode 100644
index 0000000000..69356691fa
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_list_tensorboard_experiments_async.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for ListTensorboardExperiments
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_ListTensorboardExperiments_async]
+from google.cloud import aiplatform_v1
+
+
+async def sample_list_tensorboard_experiments():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceAsyncClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListTensorboardExperimentsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_tensorboard_experiments(request=request)
+
+ # Handle the response
+ async for response in page_result:
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_ListTensorboardExperiments_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_list_tensorboard_experiments_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_list_tensorboard_experiments_sync.py
new file mode 100644
index 0000000000..34605714e4
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_list_tensorboard_experiments_sync.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for ListTensorboardExperiments
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_ListTensorboardExperiments_sync]
+from google.cloud import aiplatform_v1
+
+
+def sample_list_tensorboard_experiments():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListTensorboardExperimentsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_tensorboard_experiments(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_ListTensorboardExperiments_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_list_tensorboard_runs_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_list_tensorboard_runs_async.py
new file mode 100644
index 0000000000..4737c272b5
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_list_tensorboard_runs_async.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for ListTensorboardRuns
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_ListTensorboardRuns_async]
+from google.cloud import aiplatform_v1
+
+
+async def sample_list_tensorboard_runs():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceAsyncClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListTensorboardRunsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_tensorboard_runs(request=request)
+
+ # Handle the response
+ async for response in page_result:
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_ListTensorboardRuns_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_list_tensorboard_runs_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_list_tensorboard_runs_sync.py
new file mode 100644
index 0000000000..15fb4eaaf3
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_list_tensorboard_runs_sync.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for ListTensorboardRuns
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_ListTensorboardRuns_sync]
+from google.cloud import aiplatform_v1
+
+
+def sample_list_tensorboard_runs():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListTensorboardRunsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_tensorboard_runs(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_ListTensorboardRuns_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_list_tensorboard_time_series_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_list_tensorboard_time_series_async.py
new file mode 100644
index 0000000000..2193c537e2
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_list_tensorboard_time_series_async.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for ListTensorboardTimeSeries
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_ListTensorboardTimeSeries_async]
+from google.cloud import aiplatform_v1
+
+
+async def sample_list_tensorboard_time_series():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceAsyncClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListTensorboardTimeSeriesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_tensorboard_time_series(request=request)
+
+ # Handle the response
+ async for response in page_result:
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_ListTensorboardTimeSeries_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_list_tensorboard_time_series_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_list_tensorboard_time_series_sync.py
new file mode 100644
index 0000000000..030e03cfc9
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_list_tensorboard_time_series_sync.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for ListTensorboardTimeSeries
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_ListTensorboardTimeSeries_sync]
+from google.cloud import aiplatform_v1
+
+
+def sample_list_tensorboard_time_series():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListTensorboardTimeSeriesRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_tensorboard_time_series(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_ListTensorboardTimeSeries_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_list_tensorboards_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_list_tensorboards_async.py
new file mode 100644
index 0000000000..5266200c44
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_list_tensorboards_async.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for ListTensorboards
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_ListTensorboards_async]
+from google.cloud import aiplatform_v1
+
+
+async def sample_list_tensorboards():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceAsyncClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListTensorboardsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_tensorboards(request=request)
+
+ # Handle the response
+ async for response in page_result:
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_ListTensorboards_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_list_tensorboards_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_list_tensorboards_sync.py
new file mode 100644
index 0000000000..09c97d3155
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_list_tensorboards_sync.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for ListTensorboards
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_ListTensorboards_sync]
+from google.cloud import aiplatform_v1
+
+
+def sample_list_tensorboards():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ListTensorboardsRequest(
+ parent="parent_value",
+ )
+
+ # Make the request
+ page_result = client.list_tensorboards(request=request)
+
+ # Handle the response
+ for response in page_result:
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_ListTensorboards_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_read_tensorboard_blob_data_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_read_tensorboard_blob_data_async.py
new file mode 100644
index 0000000000..6d9ff861a3
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_read_tensorboard_blob_data_async.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for ReadTensorboardBlobData
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_ReadTensorboardBlobData_async]
+from google.cloud import aiplatform_v1
+
+
+async def sample_read_tensorboard_blob_data():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceAsyncClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ReadTensorboardBlobDataRequest(
+ time_series="time_series_value",
+ )
+
+ # Make the request
+ stream = await client.read_tensorboard_blob_data(request=request)
+
+ # Handle the response
+ async for response in stream:
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_ReadTensorboardBlobData_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_read_tensorboard_blob_data_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_read_tensorboard_blob_data_sync.py
new file mode 100644
index 0000000000..6f45a63423
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_read_tensorboard_blob_data_sync.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for ReadTensorboardBlobData
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_ReadTensorboardBlobData_sync]
+from google.cloud import aiplatform_v1
+
+
+def sample_read_tensorboard_blob_data():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ReadTensorboardBlobDataRequest(
+ time_series="time_series_value",
+ )
+
+ # Make the request
+ stream = client.read_tensorboard_blob_data(request=request)
+
+ # Handle the response
+ for response in stream:
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_ReadTensorboardBlobData_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_read_tensorboard_time_series_data_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_read_tensorboard_time_series_data_async.py
new file mode 100644
index 0000000000..4d8f1970e4
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_read_tensorboard_time_series_data_async.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for ReadTensorboardTimeSeriesData
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_ReadTensorboardTimeSeriesData_async]
+from google.cloud import aiplatform_v1
+
+
+async def sample_read_tensorboard_time_series_data():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceAsyncClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ReadTensorboardTimeSeriesDataRequest(
+ tensorboard_time_series="tensorboard_time_series_value",
+ )
+
+ # Make the request
+ response = await client.read_tensorboard_time_series_data(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_ReadTensorboardTimeSeriesData_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_read_tensorboard_time_series_data_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_read_tensorboard_time_series_data_sync.py
new file mode 100644
index 0000000000..c22c96855b
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_read_tensorboard_time_series_data_sync.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for ReadTensorboardTimeSeriesData
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_ReadTensorboardTimeSeriesData_sync]
+from google.cloud import aiplatform_v1
+
+
+def sample_read_tensorboard_time_series_data():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.ReadTensorboardTimeSeriesDataRequest(
+ tensorboard_time_series="tensorboard_time_series_value",
+ )
+
+ # Make the request
+ response = client.read_tensorboard_time_series_data(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_ReadTensorboardTimeSeriesData_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_async.py
new file mode 100644
index 0000000000..21b41292af
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_async.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for UpdateTensorboard
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_UpdateTensorboard_async]
+from google.cloud import aiplatform_v1
+
+
+async def sample_update_tensorboard():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceAsyncClient()
+
+ # Initialize request argument(s)
+ tensorboard = aiplatform_v1.Tensorboard()
+ tensorboard.display_name = "display_name_value"
+
+ request = aiplatform_v1.UpdateTensorboardRequest(
+ tensorboard=tensorboard,
+ )
+
+ # Make the request
+ operation = client.update_tensorboard(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = await operation.result()
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_UpdateTensorboard_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_experiment_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_experiment_async.py
new file mode 100644
index 0000000000..e4ccc12a0a
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_experiment_async.py
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for UpdateTensorboardExperiment
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_UpdateTensorboardExperiment_async]
+from google.cloud import aiplatform_v1
+
+
+async def sample_update_tensorboard_experiment():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceAsyncClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.UpdateTensorboardExperimentRequest(
+ )
+
+ # Make the request
+ response = await client.update_tensorboard_experiment(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_UpdateTensorboardExperiment_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_experiment_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_experiment_sync.py
new file mode 100644
index 0000000000..a914538833
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_experiment_sync.py
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for UpdateTensorboardExperiment
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_UpdateTensorboardExperiment_sync]
+from google.cloud import aiplatform_v1
+
+
+def sample_update_tensorboard_experiment():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ request = aiplatform_v1.UpdateTensorboardExperimentRequest(
+ )
+
+ # Make the request
+ response = client.update_tensorboard_experiment(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_UpdateTensorboardExperiment_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_run_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_run_async.py
new file mode 100644
index 0000000000..c847c93f84
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_run_async.py
@@ -0,0 +1,48 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for UpdateTensorboardRun
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_UpdateTensorboardRun_async]
+from google.cloud import aiplatform_v1
+
+
+async def sample_update_tensorboard_run():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceAsyncClient()
+
+ # Initialize request argument(s)
+ tensorboard_run = aiplatform_v1.TensorboardRun()
+ tensorboard_run.display_name = "display_name_value"
+
+ request = aiplatform_v1.UpdateTensorboardRunRequest(
+ tensorboard_run=tensorboard_run,
+ )
+
+ # Make the request
+ response = await client.update_tensorboard_run(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_UpdateTensorboardRun_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_run_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_run_sync.py
new file mode 100644
index 0000000000..0333b8997a
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_run_sync.py
@@ -0,0 +1,48 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for UpdateTensorboardRun
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_UpdateTensorboardRun_sync]
+from google.cloud import aiplatform_v1
+
+
+def sample_update_tensorboard_run():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ tensorboard_run = aiplatform_v1.TensorboardRun()
+ tensorboard_run.display_name = "display_name_value"
+
+ request = aiplatform_v1.UpdateTensorboardRunRequest(
+ tensorboard_run=tensorboard_run,
+ )
+
+ # Make the request
+ response = client.update_tensorboard_run(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_UpdateTensorboardRun_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_sync.py
new file mode 100644
index 0000000000..5c95bc1313
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_sync.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for UpdateTensorboard
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_UpdateTensorboard_sync]
+from google.cloud import aiplatform_v1
+
+
+def sample_update_tensorboard():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ tensorboard = aiplatform_v1.Tensorboard()
+ tensorboard.display_name = "display_name_value"
+
+ request = aiplatform_v1.UpdateTensorboardRequest(
+ tensorboard=tensorboard,
+ )
+
+ # Make the request
+ operation = client.update_tensorboard(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_UpdateTensorboard_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_time_series_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_time_series_async.py
new file mode 100644
index 0000000000..88cc10bbfc
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_time_series_async.py
@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for UpdateTensorboardTimeSeries
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_UpdateTensorboardTimeSeries_async]
+from google.cloud import aiplatform_v1
+
+
+async def sample_update_tensorboard_time_series():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceAsyncClient()
+
+ # Initialize request argument(s)
+ tensorboard_time_series = aiplatform_v1.TensorboardTimeSeries()
+ tensorboard_time_series.display_name = "display_name_value"
+ tensorboard_time_series.value_type = "BLOB_SEQUENCE"
+
+ request = aiplatform_v1.UpdateTensorboardTimeSeriesRequest(
+ tensorboard_time_series=tensorboard_time_series,
+ )
+
+ # Make the request
+ response = await client.update_tensorboard_time_series(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_UpdateTensorboardTimeSeries_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_time_series_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_time_series_sync.py
new file mode 100644
index 0000000000..312becd505
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_time_series_sync.py
@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for UpdateTensorboardTimeSeries
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_UpdateTensorboardTimeSeries_sync]
+from google.cloud import aiplatform_v1
+
+
+def sample_update_tensorboard_time_series():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ tensorboard_time_series = aiplatform_v1.TensorboardTimeSeries()
+ tensorboard_time_series.display_name = "display_name_value"
+ tensorboard_time_series.value_type = "BLOB_SEQUENCE"
+
+ request = aiplatform_v1.UpdateTensorboardTimeSeriesRequest(
+ tensorboard_time_series=tensorboard_time_series,
+ )
+
+ # Make the request
+ response = client.update_tensorboard_time_series(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_UpdateTensorboardTimeSeries_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_write_tensorboard_experiment_data_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_write_tensorboard_experiment_data_async.py
new file mode 100644
index 0000000000..df22a35021
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_write_tensorboard_experiment_data_async.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for WriteTensorboardExperimentData
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_WriteTensorboardExperimentData_async]
+from google.cloud import aiplatform_v1
+
+
+async def sample_write_tensorboard_experiment_data():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceAsyncClient()
+
+ # Initialize request argument(s)
+ write_run_data_requests = aiplatform_v1.WriteTensorboardRunDataRequest()
+ write_run_data_requests.tensorboard_run = "tensorboard_run_value"
+ write_run_data_requests.time_series_data.tensorboard_time_series_id = "tensorboard_time_series_id_value"
+ write_run_data_requests.time_series_data.value_type = "BLOB_SEQUENCE"
+
+ request = aiplatform_v1.WriteTensorboardExperimentDataRequest(
+ tensorboard_experiment="tensorboard_experiment_value",
+ write_run_data_requests=write_run_data_requests,
+ )
+
+ # Make the request
+ response = await client.write_tensorboard_experiment_data(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_WriteTensorboardExperimentData_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_write_tensorboard_experiment_data_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_write_tensorboard_experiment_data_sync.py
new file mode 100644
index 0000000000..2f40a16f87
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_write_tensorboard_experiment_data_sync.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for WriteTensorboardExperimentData
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_WriteTensorboardExperimentData_sync]
+from google.cloud import aiplatform_v1
+
+
+def sample_write_tensorboard_experiment_data():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ write_run_data_requests = aiplatform_v1.WriteTensorboardRunDataRequest()
+ write_run_data_requests.tensorboard_run = "tensorboard_run_value"
+ write_run_data_requests.time_series_data.tensorboard_time_series_id = "tensorboard_time_series_id_value"
+ write_run_data_requests.time_series_data.value_type = "BLOB_SEQUENCE"
+
+ request = aiplatform_v1.WriteTensorboardExperimentDataRequest(
+ tensorboard_experiment="tensorboard_experiment_value",
+ write_run_data_requests=write_run_data_requests,
+ )
+
+ # Make the request
+ response = client.write_tensorboard_experiment_data(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_WriteTensorboardExperimentData_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_write_tensorboard_run_data_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_write_tensorboard_run_data_async.py
new file mode 100644
index 0000000000..c4b4680f7a
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_write_tensorboard_run_data_async.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for WriteTensorboardRunData
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_WriteTensorboardRunData_async]
+from google.cloud import aiplatform_v1
+
+
+async def sample_write_tensorboard_run_data():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceAsyncClient()
+
+ # Initialize request argument(s)
+ time_series_data = aiplatform_v1.TimeSeriesData()
+ time_series_data.tensorboard_time_series_id = "tensorboard_time_series_id_value"
+ time_series_data.value_type = "BLOB_SEQUENCE"
+
+ request = aiplatform_v1.WriteTensorboardRunDataRequest(
+ tensorboard_run="tensorboard_run_value",
+ time_series_data=time_series_data,
+ )
+
+ # Make the request
+ response = await client.write_tensorboard_run_data(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_WriteTensorboardRunData_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_write_tensorboard_run_data_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_write_tensorboard_run_data_sync.py
new file mode 100644
index 0000000000..b29147d3fc
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_tensorboard_service_write_tensorboard_run_data_sync.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for WriteTensorboardRunData
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1_TensorboardService_WriteTensorboardRunData_sync]
+from google.cloud import aiplatform_v1
+
+
+def sample_write_tensorboard_run_data():
+ # Create a client
+ client = aiplatform_v1.TensorboardServiceClient()
+
+ # Initialize request argument(s)
+ time_series_data = aiplatform_v1.TimeSeriesData()
+ time_series_data.tensorboard_time_series_id = "tensorboard_time_series_id_value"
+ time_series_data.value_type = "BLOB_SEQUENCE"
+
+ request = aiplatform_v1.WriteTensorboardRunDataRequest(
+ tensorboard_run="tensorboard_run_value",
+ time_series_data=time_series_data,
+ )
+
+ # Make the request
+ response = client.write_tensorboard_run_data(request=request)
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1_TensorboardService_WriteTensorboardRunData_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_add_trial_measurement_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_add_trial_measurement_async.py
index 91c2918b0b..bba7fee36d 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_add_trial_measurement_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_add_trial_measurement_async.py
@@ -28,20 +28,18 @@
async def sample_add_trial_measurement():
- """Snippet for add_trial_measurement"""
-
# Create a client
client = aiplatform_v1.VizierServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.AddTrialMeasurementRequest(
- trial_name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}",
+ trial_name="trial_name_value",
)
# Make the request
response = await client.add_trial_measurement(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_VizierService_AddTrialMeasurement_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_add_trial_measurement_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_add_trial_measurement_sync.py
index d4758676fb..f507fe23e1 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_add_trial_measurement_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_add_trial_measurement_sync.py
@@ -28,20 +28,18 @@
def sample_add_trial_measurement():
- """Snippet for add_trial_measurement"""
-
# Create a client
client = aiplatform_v1.VizierServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.AddTrialMeasurementRequest(
- trial_name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}",
+ trial_name="trial_name_value",
)
# Make the request
response = client.add_trial_measurement(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_VizierService_AddTrialMeasurement_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_check_trial_early_stopping_state_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_check_trial_early_stopping_state_async.py
index d04fbc894b..b6e5c4a5aa 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_check_trial_early_stopping_state_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_check_trial_early_stopping_state_async.py
@@ -28,14 +28,12 @@
async def sample_check_trial_early_stopping_state():
- """Snippet for check_trial_early_stopping_state"""
-
# Create a client
client = aiplatform_v1.VizierServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.CheckTrialEarlyStoppingStateRequest(
- trial_name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}",
+ trial_name="trial_name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_check_trial_early_stopping_state():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_VizierService_CheckTrialEarlyStoppingState_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_check_trial_early_stopping_state_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_check_trial_early_stopping_state_sync.py
index dc797321c3..b3dad28e79 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_check_trial_early_stopping_state_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_check_trial_early_stopping_state_sync.py
@@ -28,14 +28,12 @@
def sample_check_trial_early_stopping_state():
- """Snippet for check_trial_early_stopping_state"""
-
# Create a client
client = aiplatform_v1.VizierServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.CheckTrialEarlyStoppingStateRequest(
- trial_name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}",
+ trial_name="trial_name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_check_trial_early_stopping_state():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_VizierService_CheckTrialEarlyStoppingState_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_complete_trial_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_complete_trial_async.py
index ef6517e653..681460ce51 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_complete_trial_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_complete_trial_async.py
@@ -28,20 +28,18 @@
async def sample_complete_trial():
- """Snippet for complete_trial"""
-
# Create a client
client = aiplatform_v1.VizierServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.CompleteTrialRequest(
- name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}",
+ name="name_value",
)
# Make the request
response = await client.complete_trial(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_VizierService_CompleteTrial_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_complete_trial_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_complete_trial_sync.py
index 94ee89c693..846dbe337e 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_complete_trial_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_complete_trial_sync.py
@@ -28,20 +28,18 @@
def sample_complete_trial():
- """Snippet for complete_trial"""
-
# Create a client
client = aiplatform_v1.VizierServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.CompleteTrialRequest(
- name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}",
+ name="name_value",
)
# Make the request
response = client.complete_trial(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_VizierService_CompleteTrial_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_create_study_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_create_study_async.py
index 50aa16e9be..f4b17517af 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_create_study_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_create_study_async.py
@@ -28,8 +28,6 @@
async def sample_create_study():
- """Snippet for create_study"""
-
# Create a client
client = aiplatform_v1.VizierServiceAsyncClient()
@@ -43,14 +41,14 @@ async def sample_create_study():
study.study_spec.parameters.parameter_id = "parameter_id_value"
request = aiplatform_v1.CreateStudyRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
study=study,
)
# Make the request
response = await client.create_study(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_VizierService_CreateStudy_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_create_study_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_create_study_sync.py
index eb5ea1a4dd..18ab422f10 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_create_study_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_create_study_sync.py
@@ -28,8 +28,6 @@
def sample_create_study():
- """Snippet for create_study"""
-
# Create a client
client = aiplatform_v1.VizierServiceClient()
@@ -43,14 +41,14 @@ def sample_create_study():
study.study_spec.parameters.parameter_id = "parameter_id_value"
request = aiplatform_v1.CreateStudyRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
study=study,
)
# Make the request
response = client.create_study(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_VizierService_CreateStudy_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_create_trial_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_create_trial_async.py
index 72a03cb5e8..c3c33b93e5 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_create_trial_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_create_trial_async.py
@@ -28,20 +28,18 @@
async def sample_create_trial():
- """Snippet for create_trial"""
-
# Create a client
client = aiplatform_v1.VizierServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.CreateTrialRequest(
- parent="projects/{project}/locations/{location}/studies/{study}",
+ parent="parent_value",
)
# Make the request
response = await client.create_trial(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_VizierService_CreateTrial_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_create_trial_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_create_trial_sync.py
index ccce18f48e..d6ca08c7be 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_create_trial_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_create_trial_sync.py
@@ -28,20 +28,18 @@
def sample_create_trial():
- """Snippet for create_trial"""
-
# Create a client
client = aiplatform_v1.VizierServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.CreateTrialRequest(
- parent="projects/{project}/locations/{location}/studies/{study}",
+ parent="parent_value",
)
# Make the request
response = client.create_trial(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_VizierService_CreateTrial_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_delete_study_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_delete_study_async.py
index 12c33b557c..39c48de3df 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_delete_study_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_delete_study_async.py
@@ -28,18 +28,16 @@
async def sample_delete_study():
- """Snippet for delete_study"""
-
# Create a client
client = aiplatform_v1.VizierServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteStudyRequest(
- name="projects/{project}/locations/{location}/studies/{study}",
+ name="name_value",
)
# Make the request
- response = await client.delete_study(request=request)
+ await client.delete_study(request=request)
# [END aiplatform_generated_aiplatform_v1_VizierService_DeleteStudy_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_delete_study_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_delete_study_sync.py
index 941781e58c..46f7929357 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_delete_study_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_delete_study_sync.py
@@ -28,18 +28,16 @@
def sample_delete_study():
- """Snippet for delete_study"""
-
# Create a client
client = aiplatform_v1.VizierServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteStudyRequest(
- name="projects/{project}/locations/{location}/studies/{study}",
+ name="name_value",
)
# Make the request
- response = client.delete_study(request=request)
+ client.delete_study(request=request)
# [END aiplatform_generated_aiplatform_v1_VizierService_DeleteStudy_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_delete_trial_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_delete_trial_async.py
index eaf0987e3d..052f351c55 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_delete_trial_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_delete_trial_async.py
@@ -28,18 +28,16 @@
async def sample_delete_trial():
- """Snippet for delete_trial"""
-
# Create a client
client = aiplatform_v1.VizierServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteTrialRequest(
- name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}",
+ name="name_value",
)
# Make the request
- response = await client.delete_trial(request=request)
+ await client.delete_trial(request=request)
# [END aiplatform_generated_aiplatform_v1_VizierService_DeleteTrial_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_delete_trial_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_delete_trial_sync.py
index fe815c635e..1c7b2a85b2 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_delete_trial_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_delete_trial_sync.py
@@ -28,18 +28,16 @@
def sample_delete_trial():
- """Snippet for delete_trial"""
-
# Create a client
client = aiplatform_v1.VizierServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteTrialRequest(
- name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}",
+ name="name_value",
)
# Make the request
- response = client.delete_trial(request=request)
+ client.delete_trial(request=request)
# [END aiplatform_generated_aiplatform_v1_VizierService_DeleteTrial_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_get_study_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_get_study_async.py
index 22d82bdc37..4b23e34234 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_get_study_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_get_study_async.py
@@ -28,20 +28,18 @@
async def sample_get_study():
- """Snippet for get_study"""
-
# Create a client
client = aiplatform_v1.VizierServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.GetStudyRequest(
- name="projects/{project}/locations/{location}/studies/{study}",
+ name="name_value",
)
# Make the request
response = await client.get_study(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_VizierService_GetStudy_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_get_study_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_get_study_sync.py
index f476ff9cb1..f7566610b3 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_get_study_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_get_study_sync.py
@@ -28,20 +28,18 @@
def sample_get_study():
- """Snippet for get_study"""
-
# Create a client
client = aiplatform_v1.VizierServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetStudyRequest(
- name="projects/{project}/locations/{location}/studies/{study}",
+ name="name_value",
)
# Make the request
response = client.get_study(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_VizierService_GetStudy_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_get_trial_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_get_trial_async.py
index 33eb6ac418..61e9c6be57 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_get_trial_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_get_trial_async.py
@@ -28,20 +28,18 @@
async def sample_get_trial():
- """Snippet for get_trial"""
-
# Create a client
client = aiplatform_v1.VizierServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.GetTrialRequest(
- name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}",
+ name="name_value",
)
# Make the request
response = await client.get_trial(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_VizierService_GetTrial_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_get_trial_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_get_trial_sync.py
index bee36c91a9..1eb2d2717b 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_get_trial_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_get_trial_sync.py
@@ -28,20 +28,18 @@
def sample_get_trial():
- """Snippet for get_trial"""
-
# Create a client
client = aiplatform_v1.VizierServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetTrialRequest(
- name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}",
+ name="name_value",
)
# Make the request
response = client.get_trial(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_VizierService_GetTrial_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_list_optimal_trials_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_list_optimal_trials_async.py
index fadb8558c6..e8f5c108ad 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_list_optimal_trials_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_list_optimal_trials_async.py
@@ -28,20 +28,18 @@
async def sample_list_optimal_trials():
- """Snippet for list_optimal_trials"""
-
# Create a client
client = aiplatform_v1.VizierServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.ListOptimalTrialsRequest(
- parent="projects/{project}/locations/{location}/studies/{study}",
+ parent="parent_value",
)
# Make the request
response = await client.list_optimal_trials(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_VizierService_ListOptimalTrials_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_list_optimal_trials_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_list_optimal_trials_sync.py
index 4081d09cb4..c6745dff6d 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_list_optimal_trials_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_list_optimal_trials_sync.py
@@ -28,20 +28,18 @@
def sample_list_optimal_trials():
- """Snippet for list_optimal_trials"""
-
# Create a client
client = aiplatform_v1.VizierServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListOptimalTrialsRequest(
- parent="projects/{project}/locations/{location}/studies/{study}",
+ parent="parent_value",
)
# Make the request
response = client.list_optimal_trials(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_VizierService_ListOptimalTrials_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_list_studies_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_list_studies_async.py
index 640fe430f6..a207937ebf 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_list_studies_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_list_studies_async.py
@@ -28,18 +28,18 @@
async def sample_list_studies():
- """Snippet for list_studies"""
-
# Create a client
client = aiplatform_v1.VizierServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.ListStudiesRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_studies(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_list_studies_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_list_studies_sync.py
index dae70b8d30..6d164d05a6 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_list_studies_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_list_studies_sync.py
@@ -28,18 +28,18 @@
def sample_list_studies():
- """Snippet for list_studies"""
-
# Create a client
client = aiplatform_v1.VizierServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListStudiesRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_studies(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_list_trials_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_list_trials_async.py
index c33444a0bc..bd24ba88c9 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_list_trials_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_list_trials_async.py
@@ -28,18 +28,18 @@
async def sample_list_trials():
- """Snippet for list_trials"""
-
# Create a client
client = aiplatform_v1.VizierServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.ListTrialsRequest(
- parent="projects/{project}/locations/{location}/studies/{study}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_trials(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_list_trials_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_list_trials_sync.py
index 3165a7fa76..24e2d84709 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_list_trials_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_list_trials_sync.py
@@ -28,18 +28,18 @@
def sample_list_trials():
- """Snippet for list_trials"""
-
# Create a client
client = aiplatform_v1.VizierServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListTrialsRequest(
- parent="projects/{project}/locations/{location}/studies/{study}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_trials(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_lookup_study_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_lookup_study_async.py
index fbcfd35a4e..4f87a235fd 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_lookup_study_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_lookup_study_async.py
@@ -28,21 +28,19 @@
async def sample_lookup_study():
- """Snippet for lookup_study"""
-
# Create a client
client = aiplatform_v1.VizierServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.LookupStudyRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
display_name="display_name_value",
)
# Make the request
response = await client.lookup_study(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_VizierService_LookupStudy_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_lookup_study_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_lookup_study_sync.py
index e003989f87..daed7351d1 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_lookup_study_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_lookup_study_sync.py
@@ -28,21 +28,19 @@
def sample_lookup_study():
- """Snippet for lookup_study"""
-
# Create a client
client = aiplatform_v1.VizierServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.LookupStudyRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
display_name="display_name_value",
)
# Make the request
response = client.lookup_study(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_VizierService_LookupStudy_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_stop_trial_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_stop_trial_async.py
index aee4fcd57f..0a29c0e484 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_stop_trial_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_stop_trial_async.py
@@ -28,20 +28,18 @@
async def sample_stop_trial():
- """Snippet for stop_trial"""
-
# Create a client
client = aiplatform_v1.VizierServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.StopTrialRequest(
- name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}",
+ name="name_value",
)
# Make the request
response = await client.stop_trial(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_VizierService_StopTrial_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_stop_trial_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_stop_trial_sync.py
index f9f7c1c27c..0d1c45a2e4 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_stop_trial_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_stop_trial_sync.py
@@ -28,20 +28,18 @@
def sample_stop_trial():
- """Snippet for stop_trial"""
-
# Create a client
client = aiplatform_v1.VizierServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.StopTrialRequest(
- name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}",
+ name="name_value",
)
# Make the request
response = client.stop_trial(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_VizierService_StopTrial_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_suggest_trials_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_suggest_trials_async.py
index 541ba7733f..5a604672c3 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_suggest_trials_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_suggest_trials_async.py
@@ -28,14 +28,12 @@
async def sample_suggest_trials():
- """Snippet for suggest_trials"""
-
# Create a client
client = aiplatform_v1.VizierServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.SuggestTrialsRequest(
- parent="projects/{project}/locations/{location}/studies/{study}",
+ parent="parent_value",
suggestion_count=1744,
client_id="client_id_value",
)
@@ -46,6 +44,8 @@ async def sample_suggest_trials():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_VizierService_SuggestTrials_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_suggest_trials_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_suggest_trials_sync.py
index 2edc9fd07e..1b733b2f9f 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_suggest_trials_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_suggest_trials_sync.py
@@ -28,14 +28,12 @@
def sample_suggest_trials():
- """Snippet for suggest_trials"""
-
# Create a client
client = aiplatform_v1.VizierServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.SuggestTrialsRequest(
- parent="projects/{project}/locations/{location}/studies/{study}",
+ parent="parent_value",
suggestion_count=1744,
client_id="client_id_value",
)
@@ -46,6 +44,8 @@ def sample_suggest_trials():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_VizierService_SuggestTrials_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_create_dataset_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_create_dataset_async.py
index 862051bdcb..8790a59835 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_create_dataset_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_create_dataset_async.py
@@ -28,8 +28,6 @@
async def sample_create_dataset():
- """Snippet for create_dataset"""
-
# Create a client
client = aiplatform_v1beta1.DatasetServiceAsyncClient()
@@ -40,7 +38,7 @@ async def sample_create_dataset():
dataset.metadata.null_value = "NULL_VALUE"
request = aiplatform_v1beta1.CreateDatasetRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
dataset=dataset,
)
@@ -50,6 +48,8 @@ async def sample_create_dataset():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_DatasetService_CreateDataset_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_create_dataset_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_create_dataset_sync.py
index 7d5b0304f7..9dd9ca09e5 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_create_dataset_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_create_dataset_sync.py
@@ -28,8 +28,6 @@
def sample_create_dataset():
- """Snippet for create_dataset"""
-
# Create a client
client = aiplatform_v1beta1.DatasetServiceClient()
@@ -40,7 +38,7 @@ def sample_create_dataset():
dataset.metadata.null_value = "NULL_VALUE"
request = aiplatform_v1beta1.CreateDatasetRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
dataset=dataset,
)
@@ -50,6 +48,8 @@ def sample_create_dataset():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_DatasetService_CreateDataset_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_delete_dataset_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_delete_dataset_async.py
index 1d7b11f434..1d048a5531 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_delete_dataset_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_delete_dataset_async.py
@@ -28,14 +28,12 @@
async def sample_delete_dataset():
- """Snippet for delete_dataset"""
-
# Create a client
client = aiplatform_v1beta1.DatasetServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteDatasetRequest(
- name="projects/{project}/locations/{location}/datasets/{dataset}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_dataset():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_DatasetService_DeleteDataset_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_delete_dataset_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_delete_dataset_sync.py
index 6b96507735..956a153c3e 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_delete_dataset_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_delete_dataset_sync.py
@@ -28,14 +28,12 @@
def sample_delete_dataset():
- """Snippet for delete_dataset"""
-
# Create a client
client = aiplatform_v1beta1.DatasetServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteDatasetRequest(
- name="projects/{project}/locations/{location}/datasets/{dataset}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_dataset():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_DatasetService_DeleteDataset_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_export_data_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_export_data_async.py
index 3a4a305185..b439cc0712 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_export_data_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_export_data_async.py
@@ -28,8 +28,6 @@
async def sample_export_data():
- """Snippet for export_data"""
-
# Create a client
client = aiplatform_v1beta1.DatasetServiceAsyncClient()
@@ -38,7 +36,7 @@ async def sample_export_data():
export_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value"
request = aiplatform_v1beta1.ExportDataRequest(
- name="projects/{project}/locations/{location}/datasets/{dataset}",
+ name="name_value",
export_config=export_config,
)
@@ -48,6 +46,8 @@ async def sample_export_data():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_DatasetService_ExportData_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_export_data_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_export_data_sync.py
index 0c3911c5f8..abba28b612 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_export_data_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_export_data_sync.py
@@ -28,8 +28,6 @@
def sample_export_data():
- """Snippet for export_data"""
-
# Create a client
client = aiplatform_v1beta1.DatasetServiceClient()
@@ -38,7 +36,7 @@ def sample_export_data():
export_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value"
request = aiplatform_v1beta1.ExportDataRequest(
- name="projects/{project}/locations/{location}/datasets/{dataset}",
+ name="name_value",
export_config=export_config,
)
@@ -48,6 +46,8 @@ def sample_export_data():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_DatasetService_ExportData_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_get_annotation_spec_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_get_annotation_spec_async.py
index b6b72c77e1..66bc7074f9 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_get_annotation_spec_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_get_annotation_spec_async.py
@@ -28,20 +28,18 @@
async def sample_get_annotation_spec():
- """Snippet for get_annotation_spec"""
-
# Create a client
client = aiplatform_v1beta1.DatasetServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetAnnotationSpecRequest(
- name="projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}",
+ name="name_value",
)
# Make the request
response = await client.get_annotation_spec(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_DatasetService_GetAnnotationSpec_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_get_annotation_spec_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_get_annotation_spec_sync.py
index 710e665530..b6ae993cea 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_get_annotation_spec_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_get_annotation_spec_sync.py
@@ -28,20 +28,18 @@
def sample_get_annotation_spec():
- """Snippet for get_annotation_spec"""
-
# Create a client
client = aiplatform_v1beta1.DatasetServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetAnnotationSpecRequest(
- name="projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}",
+ name="name_value",
)
# Make the request
response = client.get_annotation_spec(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_DatasetService_GetAnnotationSpec_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_get_dataset_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_get_dataset_async.py
index 8dddd5ddb5..ded49f4cdb 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_get_dataset_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_get_dataset_async.py
@@ -28,20 +28,18 @@
async def sample_get_dataset():
- """Snippet for get_dataset"""
-
# Create a client
client = aiplatform_v1beta1.DatasetServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetDatasetRequest(
- name="projects/{project}/locations/{location}/datasets/{dataset}",
+ name="name_value",
)
# Make the request
response = await client.get_dataset(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_DatasetService_GetDataset_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_get_dataset_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_get_dataset_sync.py
index 27cd2f3b15..2b13c32dc4 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_get_dataset_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_get_dataset_sync.py
@@ -28,20 +28,18 @@
def sample_get_dataset():
- """Snippet for get_dataset"""
-
# Create a client
client = aiplatform_v1beta1.DatasetServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetDatasetRequest(
- name="projects/{project}/locations/{location}/datasets/{dataset}",
+ name="name_value",
)
# Make the request
response = client.get_dataset(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_DatasetService_GetDataset_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_import_data_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_import_data_async.py
index 61955a8ceb..fdfe38150e 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_import_data_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_import_data_async.py
@@ -28,8 +28,6 @@
async def sample_import_data():
- """Snippet for import_data"""
-
# Create a client
client = aiplatform_v1beta1.DatasetServiceAsyncClient()
@@ -39,7 +37,7 @@ async def sample_import_data():
import_configs.import_schema_uri = "import_schema_uri_value"
request = aiplatform_v1beta1.ImportDataRequest(
- name="projects/{project}/locations/{location}/datasets/{dataset}",
+ name="name_value",
import_configs=import_configs,
)
@@ -49,6 +47,8 @@ async def sample_import_data():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_DatasetService_ImportData_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_import_data_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_import_data_sync.py
index c25c1ba4a1..241133d149 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_import_data_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_import_data_sync.py
@@ -28,8 +28,6 @@
def sample_import_data():
- """Snippet for import_data"""
-
# Create a client
client = aiplatform_v1beta1.DatasetServiceClient()
@@ -39,7 +37,7 @@ def sample_import_data():
import_configs.import_schema_uri = "import_schema_uri_value"
request = aiplatform_v1beta1.ImportDataRequest(
- name="projects/{project}/locations/{location}/datasets/{dataset}",
+ name="name_value",
import_configs=import_configs,
)
@@ -49,6 +47,8 @@ def sample_import_data():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_DatasetService_ImportData_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_list_annotations_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_list_annotations_async.py
index 24b060c822..d138891541 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_list_annotations_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_list_annotations_async.py
@@ -28,18 +28,18 @@
async def sample_list_annotations():
- """Snippet for list_annotations"""
-
# Create a client
client = aiplatform_v1beta1.DatasetServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListAnnotationsRequest(
- parent="projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_annotations(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_list_annotations_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_list_annotations_sync.py
index 88bbbaad0b..e73c42f8e8 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_list_annotations_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_list_annotations_sync.py
@@ -28,18 +28,18 @@
def sample_list_annotations():
- """Snippet for list_annotations"""
-
# Create a client
client = aiplatform_v1beta1.DatasetServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListAnnotationsRequest(
- parent="projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_annotations(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_list_data_items_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_list_data_items_async.py
index 815f28c89c..91e2690ef1 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_list_data_items_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_list_data_items_async.py
@@ -28,18 +28,18 @@
async def sample_list_data_items():
- """Snippet for list_data_items"""
-
# Create a client
client = aiplatform_v1beta1.DatasetServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListDataItemsRequest(
- parent="projects/{project}/locations/{location}/datasets/{dataset}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_data_items(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_list_data_items_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_list_data_items_sync.py
index 03fa0617ac..1f5063fcc0 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_list_data_items_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_list_data_items_sync.py
@@ -28,18 +28,18 @@
def sample_list_data_items():
- """Snippet for list_data_items"""
-
# Create a client
client = aiplatform_v1beta1.DatasetServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListDataItemsRequest(
- parent="projects/{project}/locations/{location}/datasets/{dataset}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_data_items(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_list_datasets_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_list_datasets_async.py
index 24ee641527..fbce88bf42 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_list_datasets_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_list_datasets_async.py
@@ -28,18 +28,18 @@
async def sample_list_datasets():
- """Snippet for list_datasets"""
-
# Create a client
client = aiplatform_v1beta1.DatasetServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListDatasetsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_datasets(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_list_datasets_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_list_datasets_sync.py
index 085a436e39..4f4c1ff9fe 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_list_datasets_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_list_datasets_sync.py
@@ -28,18 +28,18 @@
def sample_list_datasets():
- """Snippet for list_datasets"""
-
# Create a client
client = aiplatform_v1beta1.DatasetServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListDatasetsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_datasets(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_update_dataset_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_update_dataset_async.py
index d1914e107e..50420cea12 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_update_dataset_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_update_dataset_async.py
@@ -28,8 +28,6 @@
async def sample_update_dataset():
- """Snippet for update_dataset"""
-
# Create a client
client = aiplatform_v1beta1.DatasetServiceAsyncClient()
@@ -46,7 +44,7 @@ async def sample_update_dataset():
# Make the request
response = await client.update_dataset(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_DatasetService_UpdateDataset_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_update_dataset_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_update_dataset_sync.py
index 1a95e4c8f5..96bbc05459 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_update_dataset_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_update_dataset_sync.py
@@ -28,8 +28,6 @@
def sample_update_dataset():
- """Snippet for update_dataset"""
-
# Create a client
client = aiplatform_v1beta1.DatasetServiceClient()
@@ -46,7 +44,7 @@ def sample_update_dataset():
# Make the request
response = client.update_dataset(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_DatasetService_UpdateDataset_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_create_endpoint_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_create_endpoint_async.py
index f3c2c2294c..1b93540a90 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_create_endpoint_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_create_endpoint_async.py
@@ -28,8 +28,6 @@
async def sample_create_endpoint():
- """Snippet for create_endpoint"""
-
# Create a client
client = aiplatform_v1beta1.EndpointServiceAsyncClient()
@@ -38,7 +36,7 @@ async def sample_create_endpoint():
endpoint.display_name = "display_name_value"
request = aiplatform_v1beta1.CreateEndpointRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
endpoint=endpoint,
)
@@ -48,6 +46,8 @@ async def sample_create_endpoint():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_EndpointService_CreateEndpoint_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_create_endpoint_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_create_endpoint_sync.py
index 5117afde74..9c7b1693f4 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_create_endpoint_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_create_endpoint_sync.py
@@ -28,8 +28,6 @@
def sample_create_endpoint():
- """Snippet for create_endpoint"""
-
# Create a client
client = aiplatform_v1beta1.EndpointServiceClient()
@@ -38,7 +36,7 @@ def sample_create_endpoint():
endpoint.display_name = "display_name_value"
request = aiplatform_v1beta1.CreateEndpointRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
endpoint=endpoint,
)
@@ -48,6 +46,8 @@ def sample_create_endpoint():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_EndpointService_CreateEndpoint_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_delete_endpoint_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_delete_endpoint_async.py
index 6ff301e8a0..0711fcf13a 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_delete_endpoint_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_delete_endpoint_async.py
@@ -28,14 +28,12 @@
async def sample_delete_endpoint():
- """Snippet for delete_endpoint"""
-
# Create a client
client = aiplatform_v1beta1.EndpointServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteEndpointRequest(
- name="projects/{project}/locations/{location}/endpoints/{endpoint}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_endpoint():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_EndpointService_DeleteEndpoint_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_delete_endpoint_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_delete_endpoint_sync.py
index cdd58737f6..c32aebd9e0 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_delete_endpoint_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_delete_endpoint_sync.py
@@ -28,14 +28,12 @@
def sample_delete_endpoint():
- """Snippet for delete_endpoint"""
-
# Create a client
client = aiplatform_v1beta1.EndpointServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteEndpointRequest(
- name="projects/{project}/locations/{location}/endpoints/{endpoint}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_endpoint():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_EndpointService_DeleteEndpoint_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_deploy_model_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_deploy_model_async.py
index 61e4db72aa..6e47bd0d84 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_deploy_model_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_deploy_model_async.py
@@ -28,18 +28,16 @@
async def sample_deploy_model():
- """Snippet for deploy_model"""
-
# Create a client
client = aiplatform_v1beta1.EndpointServiceAsyncClient()
# Initialize request argument(s)
deployed_model = aiplatform_v1beta1.DeployedModel()
deployed_model.dedicated_resources.min_replica_count = 1803
- deployed_model.model = "projects/{project}/locations/{location}/models/{model}"
+ deployed_model.model = "model_value"
request = aiplatform_v1beta1.DeployModelRequest(
- endpoint="projects/{project}/locations/{location}/endpoints/{endpoint}",
+ endpoint="endpoint_value",
deployed_model=deployed_model,
)
@@ -49,6 +47,8 @@ async def sample_deploy_model():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_EndpointService_DeployModel_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_deploy_model_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_deploy_model_sync.py
index 54afa0f44b..e42d116949 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_deploy_model_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_deploy_model_sync.py
@@ -28,18 +28,16 @@
def sample_deploy_model():
- """Snippet for deploy_model"""
-
# Create a client
client = aiplatform_v1beta1.EndpointServiceClient()
# Initialize request argument(s)
deployed_model = aiplatform_v1beta1.DeployedModel()
deployed_model.dedicated_resources.min_replica_count = 1803
- deployed_model.model = "projects/{project}/locations/{location}/models/{model}"
+ deployed_model.model = "model_value"
request = aiplatform_v1beta1.DeployModelRequest(
- endpoint="projects/{project}/locations/{location}/endpoints/{endpoint}",
+ endpoint="endpoint_value",
deployed_model=deployed_model,
)
@@ -49,6 +47,8 @@ def sample_deploy_model():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_EndpointService_DeployModel_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_get_endpoint_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_get_endpoint_async.py
index e3b8df7381..7a9ecc6ff2 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_get_endpoint_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_get_endpoint_async.py
@@ -28,20 +28,18 @@
async def sample_get_endpoint():
- """Snippet for get_endpoint"""
-
# Create a client
client = aiplatform_v1beta1.EndpointServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetEndpointRequest(
- name="projects/{project}/locations/{location}/endpoints/{endpoint}",
+ name="name_value",
)
# Make the request
response = await client.get_endpoint(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_EndpointService_GetEndpoint_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_get_endpoint_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_get_endpoint_sync.py
index 98c7295fc7..89a2781f64 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_get_endpoint_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_get_endpoint_sync.py
@@ -28,20 +28,18 @@
def sample_get_endpoint():
- """Snippet for get_endpoint"""
-
# Create a client
client = aiplatform_v1beta1.EndpointServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetEndpointRequest(
- name="projects/{project}/locations/{location}/endpoints/{endpoint}",
+ name="name_value",
)
# Make the request
response = client.get_endpoint(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_EndpointService_GetEndpoint_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_list_endpoints_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_list_endpoints_async.py
index 8ff7ae4f46..bc579602be 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_list_endpoints_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_list_endpoints_async.py
@@ -28,18 +28,18 @@
async def sample_list_endpoints():
- """Snippet for list_endpoints"""
-
# Create a client
client = aiplatform_v1beta1.EndpointServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListEndpointsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_endpoints(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_list_endpoints_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_list_endpoints_sync.py
index 4a290747a4..e4bd7f3250 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_list_endpoints_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_list_endpoints_sync.py
@@ -28,18 +28,18 @@
def sample_list_endpoints():
- """Snippet for list_endpoints"""
-
# Create a client
client = aiplatform_v1beta1.EndpointServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListEndpointsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_endpoints(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_undeploy_model_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_undeploy_model_async.py
index 575953c20e..f4c87cd294 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_undeploy_model_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_undeploy_model_async.py
@@ -28,14 +28,12 @@
async def sample_undeploy_model():
- """Snippet for undeploy_model"""
-
# Create a client
client = aiplatform_v1beta1.EndpointServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.UndeployModelRequest(
- endpoint="projects/{project}/locations/{location}/endpoints/{endpoint}",
+ endpoint="endpoint_value",
deployed_model_id="deployed_model_id_value",
)
@@ -45,6 +43,8 @@ async def sample_undeploy_model():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_EndpointService_UndeployModel_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_undeploy_model_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_undeploy_model_sync.py
index 1fd3e05320..9107fe1553 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_undeploy_model_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_undeploy_model_sync.py
@@ -28,14 +28,12 @@
def sample_undeploy_model():
- """Snippet for undeploy_model"""
-
# Create a client
client = aiplatform_v1beta1.EndpointServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.UndeployModelRequest(
- endpoint="projects/{project}/locations/{location}/endpoints/{endpoint}",
+ endpoint="endpoint_value",
deployed_model_id="deployed_model_id_value",
)
@@ -45,6 +43,8 @@ def sample_undeploy_model():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_EndpointService_UndeployModel_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_update_endpoint_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_update_endpoint_async.py
index 4393b51b97..18325685dd 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_update_endpoint_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_update_endpoint_async.py
@@ -28,8 +28,6 @@
async def sample_update_endpoint():
- """Snippet for update_endpoint"""
-
# Create a client
client = aiplatform_v1beta1.EndpointServiceAsyncClient()
@@ -44,7 +42,7 @@ async def sample_update_endpoint():
# Make the request
response = await client.update_endpoint(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_EndpointService_UpdateEndpoint_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_update_endpoint_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_update_endpoint_sync.py
index 50b41f5a68..b9c7e60334 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_update_endpoint_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_endpoint_service_update_endpoint_sync.py
@@ -28,8 +28,6 @@
def sample_update_endpoint():
- """Snippet for update_endpoint"""
-
# Create a client
client = aiplatform_v1beta1.EndpointServiceClient()
@@ -44,7 +42,7 @@ def sample_update_endpoint():
# Make the request
response = client.update_endpoint(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_EndpointService_UpdateEndpoint_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_online_serving_service_read_feature_values_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_online_serving_service_read_feature_values_async.py
index c9e27df53c..695faa816a 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_online_serving_service_read_feature_values_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_online_serving_service_read_feature_values_async.py
@@ -28,8 +28,6 @@
async def sample_read_feature_values():
- """Snippet for read_feature_values"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreOnlineServingServiceAsyncClient()
@@ -38,7 +36,7 @@ async def sample_read_feature_values():
feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2']
request = aiplatform_v1beta1.ReadFeatureValuesRequest(
- entity_type="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}",
+ entity_type="entity_type_value",
entity_id="entity_id_value",
feature_selector=feature_selector,
)
@@ -46,7 +44,7 @@ async def sample_read_feature_values():
# Make the request
response = await client.read_feature_values(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreOnlineServingService_ReadFeatureValues_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_online_serving_service_read_feature_values_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_online_serving_service_read_feature_values_sync.py
index 0bd04feacd..a5aa640a7f 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_online_serving_service_read_feature_values_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_online_serving_service_read_feature_values_sync.py
@@ -28,8 +28,6 @@
def sample_read_feature_values():
- """Snippet for read_feature_values"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreOnlineServingServiceClient()
@@ -38,7 +36,7 @@ def sample_read_feature_values():
feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2']
request = aiplatform_v1beta1.ReadFeatureValuesRequest(
- entity_type="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}",
+ entity_type="entity_type_value",
entity_id="entity_id_value",
feature_selector=feature_selector,
)
@@ -46,7 +44,7 @@ def sample_read_feature_values():
# Make the request
response = client.read_feature_values(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreOnlineServingService_ReadFeatureValues_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_online_serving_service_streaming_read_feature_values_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_online_serving_service_streaming_read_feature_values_async.py
index 00bc9590d4..f094c89a90 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_online_serving_service_streaming_read_feature_values_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_online_serving_service_streaming_read_feature_values_async.py
@@ -28,8 +28,6 @@
async def sample_streaming_read_feature_values():
- """Snippet for streaming_read_feature_values"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreOnlineServingServiceAsyncClient()
@@ -38,13 +36,15 @@ async def sample_streaming_read_feature_values():
feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2']
request = aiplatform_v1beta1.StreamingReadFeatureValuesRequest(
- entity_type="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}",
+ entity_type="entity_type_value",
entity_ids=['entity_ids_value_1', 'entity_ids_value_2'],
feature_selector=feature_selector,
)
# Make the request
stream = await client.streaming_read_feature_values(request=request)
+
+ # Handle the response
async for response in stream:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_online_serving_service_streaming_read_feature_values_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_online_serving_service_streaming_read_feature_values_sync.py
index 4017328f36..1e434b1afc 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_online_serving_service_streaming_read_feature_values_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_online_serving_service_streaming_read_feature_values_sync.py
@@ -28,8 +28,6 @@
def sample_streaming_read_feature_values():
- """Snippet for streaming_read_feature_values"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreOnlineServingServiceClient()
@@ -38,13 +36,15 @@ def sample_streaming_read_feature_values():
feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2']
request = aiplatform_v1beta1.StreamingReadFeatureValuesRequest(
- entity_type="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}",
+ entity_type="entity_type_value",
entity_ids=['entity_ids_value_1', 'entity_ids_value_2'],
feature_selector=feature_selector,
)
# Make the request
stream = client.streaming_read_feature_values(request=request)
+
+ # Handle the response
for response in stream:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_batch_create_features_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_batch_create_features_async.py
index 8f0ca17e95..27a1bc6243 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_batch_create_features_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_batch_create_features_async.py
@@ -28,19 +28,17 @@
async def sample_batch_create_features():
- """Snippet for batch_create_features"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient()
# Initialize request argument(s)
requests = aiplatform_v1beta1.CreateFeatureRequest()
- requests.parent = "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}"
+ requests.parent = "parent_value"
requests.feature.value_type = "BYTES"
requests.feature_id = "feature_id_value"
request = aiplatform_v1beta1.BatchCreateFeaturesRequest(
- parent="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}",
+ parent="parent_value",
requests=requests,
)
@@ -50,6 +48,8 @@ async def sample_batch_create_features():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_BatchCreateFeatures_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_batch_create_features_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_batch_create_features_sync.py
index d359e00aad..a6066d929f 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_batch_create_features_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_batch_create_features_sync.py
@@ -28,19 +28,17 @@
def sample_batch_create_features():
- """Snippet for batch_create_features"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceClient()
# Initialize request argument(s)
requests = aiplatform_v1beta1.CreateFeatureRequest()
- requests.parent = "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}"
+ requests.parent = "parent_value"
requests.feature.value_type = "BYTES"
requests.feature_id = "feature_id_value"
request = aiplatform_v1beta1.BatchCreateFeaturesRequest(
- parent="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}",
+ parent="parent_value",
requests=requests,
)
@@ -50,6 +48,8 @@ def sample_batch_create_features():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_BatchCreateFeatures_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_batch_read_feature_values_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_batch_read_feature_values_async.py
index 1e4684cf99..7e4569fe25 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_batch_read_feature_values_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_batch_read_feature_values_async.py
@@ -28,8 +28,6 @@
async def sample_batch_read_feature_values():
- """Snippet for batch_read_feature_values"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient()
@@ -46,7 +44,7 @@ async def sample_batch_read_feature_values():
request = aiplatform_v1beta1.BatchReadFeatureValuesRequest(
csv_read_instances=csv_read_instances,
- featurestore="projects/{project}/locations/{location}/featurestores/{featurestore}",
+ featurestore="featurestore_value",
destination=destination,
entity_type_specs=entity_type_specs,
)
@@ -57,6 +55,8 @@ async def sample_batch_read_feature_values():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_BatchReadFeatureValues_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_batch_read_feature_values_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_batch_read_feature_values_sync.py
index 0eee36ff37..47809efd99 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_batch_read_feature_values_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_batch_read_feature_values_sync.py
@@ -28,8 +28,6 @@
def sample_batch_read_feature_values():
- """Snippet for batch_read_feature_values"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceClient()
@@ -46,7 +44,7 @@ def sample_batch_read_feature_values():
request = aiplatform_v1beta1.BatchReadFeatureValuesRequest(
csv_read_instances=csv_read_instances,
- featurestore="projects/{project}/locations/{location}/featurestores/{featurestore}",
+ featurestore="featurestore_value",
destination=destination,
entity_type_specs=entity_type_specs,
)
@@ -57,6 +55,8 @@ def sample_batch_read_feature_values():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_BatchReadFeatureValues_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_entity_type_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_entity_type_async.py
index d169132c2b..b03e1032a4 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_entity_type_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_entity_type_async.py
@@ -28,14 +28,12 @@
async def sample_create_entity_type():
- """Snippet for create_entity_type"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.CreateEntityTypeRequest(
- parent="projects/{project}/locations/{location}/featurestores/{featurestore}",
+ parent="parent_value",
entity_type_id="entity_type_id_value",
)
@@ -45,6 +43,8 @@ async def sample_create_entity_type():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_CreateEntityType_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_entity_type_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_entity_type_sync.py
index 58a707e768..b22044fd75 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_entity_type_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_entity_type_sync.py
@@ -28,14 +28,12 @@
def sample_create_entity_type():
- """Snippet for create_entity_type"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.CreateEntityTypeRequest(
- parent="projects/{project}/locations/{location}/featurestores/{featurestore}",
+ parent="parent_value",
entity_type_id="entity_type_id_value",
)
@@ -45,6 +43,8 @@ def sample_create_entity_type():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_CreateEntityType_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_feature_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_feature_async.py
index e8ec6e67f2..3551c949a0 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_feature_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_feature_async.py
@@ -28,8 +28,6 @@
async def sample_create_feature():
- """Snippet for create_feature"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient()
@@ -38,7 +36,7 @@ async def sample_create_feature():
feature.value_type = "BYTES"
request = aiplatform_v1beta1.CreateFeatureRequest(
- parent="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}",
+ parent="parent_value",
feature=feature,
feature_id="feature_id_value",
)
@@ -49,6 +47,8 @@ async def sample_create_feature():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_CreateFeature_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_feature_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_feature_sync.py
index da31d7db7b..07018ad808 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_feature_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_feature_sync.py
@@ -28,8 +28,6 @@
def sample_create_feature():
- """Snippet for create_feature"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceClient()
@@ -38,7 +36,7 @@ def sample_create_feature():
feature.value_type = "BYTES"
request = aiplatform_v1beta1.CreateFeatureRequest(
- parent="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}",
+ parent="parent_value",
feature=feature,
feature_id="feature_id_value",
)
@@ -49,6 +47,8 @@ def sample_create_feature():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_CreateFeature_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_featurestore_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_featurestore_async.py
index 605a7ac6a9..8375d2d81d 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_featurestore_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_featurestore_async.py
@@ -28,14 +28,12 @@
async def sample_create_featurestore():
- """Snippet for create_featurestore"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.CreateFeaturestoreRequest(
- parent="projects/{project}/locations/{location}/featurestores/{featurestore}",
+ parent="parent_value",
featurestore_id="featurestore_id_value",
)
@@ -45,6 +43,8 @@ async def sample_create_featurestore():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_CreateFeaturestore_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_featurestore_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_featurestore_sync.py
index 77828d031b..d41efa59c7 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_featurestore_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_featurestore_sync.py
@@ -28,14 +28,12 @@
def sample_create_featurestore():
- """Snippet for create_featurestore"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.CreateFeaturestoreRequest(
- parent="projects/{project}/locations/{location}/featurestores/{featurestore}",
+ parent="parent_value",
featurestore_id="featurestore_id_value",
)
@@ -45,6 +43,8 @@ def sample_create_featurestore():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_CreateFeaturestore_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_entity_type_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_entity_type_async.py
index 06fec44675..a00c7f7c45 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_entity_type_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_entity_type_async.py
@@ -28,14 +28,12 @@
async def sample_delete_entity_type():
- """Snippet for delete_entity_type"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteEntityTypeRequest(
- name="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_entity_type():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_DeleteEntityType_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_entity_type_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_entity_type_sync.py
index 75fadfa9c1..13a25df43c 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_entity_type_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_entity_type_sync.py
@@ -28,14 +28,12 @@
def sample_delete_entity_type():
- """Snippet for delete_entity_type"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteEntityTypeRequest(
- name="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_entity_type():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_DeleteEntityType_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_feature_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_feature_async.py
index c93c449dbe..4a53587ece 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_feature_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_feature_async.py
@@ -28,14 +28,12 @@
async def sample_delete_feature():
- """Snippet for delete_feature"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteFeatureRequest(
- name="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_feature():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_DeleteFeature_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_feature_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_feature_sync.py
index 16c522bad3..f70e14360c 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_feature_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_feature_sync.py
@@ -28,14 +28,12 @@
def sample_delete_feature():
- """Snippet for delete_feature"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteFeatureRequest(
- name="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_feature():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_DeleteFeature_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_featurestore_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_featurestore_async.py
index ed176e4905..bff7359bce 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_featurestore_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_featurestore_async.py
@@ -28,14 +28,12 @@
async def sample_delete_featurestore():
- """Snippet for delete_featurestore"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteFeaturestoreRequest(
- name="projects/{project}/locations/{location}/featurestores/{featurestore}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_featurestore():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_DeleteFeaturestore_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_featurestore_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_featurestore_sync.py
index 237ddb39e1..a8a5ec56d1 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_featurestore_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_featurestore_sync.py
@@ -28,14 +28,12 @@
def sample_delete_featurestore():
- """Snippet for delete_featurestore"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteFeaturestoreRequest(
- name="projects/{project}/locations/{location}/featurestores/{featurestore}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_featurestore():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_DeleteFeaturestore_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_export_feature_values_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_export_feature_values_async.py
index f63676879c..3fb5566916 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_export_feature_values_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_export_feature_values_async.py
@@ -28,8 +28,6 @@
async def sample_export_feature_values():
- """Snippet for export_feature_values"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient()
@@ -41,7 +39,7 @@ async def sample_export_feature_values():
feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2']
request = aiplatform_v1beta1.ExportFeatureValuesRequest(
- entity_type="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}",
+ entity_type="entity_type_value",
destination=destination,
feature_selector=feature_selector,
)
@@ -52,6 +50,8 @@ async def sample_export_feature_values():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_ExportFeatureValues_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_export_feature_values_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_export_feature_values_sync.py
index 6194b81033..0fed7b17e8 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_export_feature_values_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_export_feature_values_sync.py
@@ -28,8 +28,6 @@
def sample_export_feature_values():
- """Snippet for export_feature_values"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceClient()
@@ -41,7 +39,7 @@ def sample_export_feature_values():
feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2']
request = aiplatform_v1beta1.ExportFeatureValuesRequest(
- entity_type="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}",
+ entity_type="entity_type_value",
destination=destination,
feature_selector=feature_selector,
)
@@ -52,6 +50,8 @@ def sample_export_feature_values():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_ExportFeatureValues_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_entity_type_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_entity_type_async.py
index 7b39f5d834..319d5993c2 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_entity_type_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_entity_type_async.py
@@ -28,20 +28,18 @@
async def sample_get_entity_type():
- """Snippet for get_entity_type"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetEntityTypeRequest(
- name="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}",
+ name="name_value",
)
# Make the request
response = await client.get_entity_type(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_GetEntityType_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_entity_type_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_entity_type_sync.py
index 405af58e39..b9c22b201a 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_entity_type_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_entity_type_sync.py
@@ -28,20 +28,18 @@
def sample_get_entity_type():
- """Snippet for get_entity_type"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetEntityTypeRequest(
- name="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}",
+ name="name_value",
)
# Make the request
response = client.get_entity_type(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_GetEntityType_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_feature_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_feature_async.py
index 05ca6d59c2..2fc8940f23 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_feature_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_feature_async.py
@@ -28,20 +28,18 @@
async def sample_get_feature():
- """Snippet for get_feature"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetFeatureRequest(
- name="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}",
+ name="name_value",
)
# Make the request
response = await client.get_feature(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_GetFeature_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_feature_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_feature_sync.py
index 906591e6d2..e34fd9dca0 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_feature_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_feature_sync.py
@@ -28,20 +28,18 @@
def sample_get_feature():
- """Snippet for get_feature"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetFeatureRequest(
- name="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}",
+ name="name_value",
)
# Make the request
response = client.get_feature(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_GetFeature_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_featurestore_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_featurestore_async.py
index f8f382cf24..b025ddb1b4 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_featurestore_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_featurestore_async.py
@@ -28,20 +28,18 @@
async def sample_get_featurestore():
- """Snippet for get_featurestore"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetFeaturestoreRequest(
- name="projects/{project}/locations/{location}/featurestores/{featurestore}",
+ name="name_value",
)
# Make the request
response = await client.get_featurestore(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_GetFeaturestore_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_featurestore_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_featurestore_sync.py
index fed9106a74..d445f4dee3 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_featurestore_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_featurestore_sync.py
@@ -28,20 +28,18 @@
def sample_get_featurestore():
- """Snippet for get_featurestore"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetFeaturestoreRequest(
- name="projects/{project}/locations/{location}/featurestores/{featurestore}",
+ name="name_value",
)
# Make the request
response = client.get_featurestore(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_GetFeaturestore_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_import_feature_values_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_import_feature_values_async.py
index 3fa72e136b..701f1b7fbb 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_import_feature_values_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_import_feature_values_async.py
@@ -28,8 +28,6 @@
async def sample_import_feature_values():
- """Snippet for import_feature_values"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient()
@@ -43,7 +41,7 @@ async def sample_import_feature_values():
request = aiplatform_v1beta1.ImportFeatureValuesRequest(
avro_source=avro_source,
feature_time_field="feature_time_field_value",
- entity_type="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}",
+ entity_type="entity_type_value",
feature_specs=feature_specs,
)
@@ -53,6 +51,8 @@ async def sample_import_feature_values():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_ImportFeatureValues_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_import_feature_values_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_import_feature_values_sync.py
index 8cd440ea5a..8db8247482 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_import_feature_values_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_import_feature_values_sync.py
@@ -28,8 +28,6 @@
def sample_import_feature_values():
- """Snippet for import_feature_values"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceClient()
@@ -43,7 +41,7 @@ def sample_import_feature_values():
request = aiplatform_v1beta1.ImportFeatureValuesRequest(
avro_source=avro_source,
feature_time_field="feature_time_field_value",
- entity_type="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}",
+ entity_type="entity_type_value",
feature_specs=feature_specs,
)
@@ -53,6 +51,8 @@ def sample_import_feature_values():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_ImportFeatureValues_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_entity_types_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_entity_types_async.py
index 650a394185..4d487faaaf 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_entity_types_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_entity_types_async.py
@@ -28,18 +28,18 @@
async def sample_list_entity_types():
- """Snippet for list_entity_types"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListEntityTypesRequest(
- parent="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_entity_types(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_entity_types_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_entity_types_sync.py
index 6f882c5f7d..7cb144224d 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_entity_types_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_entity_types_sync.py
@@ -28,18 +28,18 @@
def sample_list_entity_types():
- """Snippet for list_entity_types"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListEntityTypesRequest(
- parent="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_entity_types(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_features_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_features_async.py
index 96f88c83e6..0086aa091f 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_features_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_features_async.py
@@ -28,18 +28,18 @@
async def sample_list_features():
- """Snippet for list_features"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListFeaturesRequest(
- parent="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_features(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_features_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_features_sync.py
index 6fc2ef490d..c721a29bc4 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_features_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_features_sync.py
@@ -28,18 +28,18 @@
def sample_list_features():
- """Snippet for list_features"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListFeaturesRequest(
- parent="projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_features(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_featurestores_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_featurestores_async.py
index f38e948e17..020ec74245 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_featurestores_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_featurestores_async.py
@@ -28,18 +28,18 @@
async def sample_list_featurestores():
- """Snippet for list_featurestores"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListFeaturestoresRequest(
- parent="projects/{project}/locations/{location}/featurestores/{featurestore}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_featurestores(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_featurestores_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_featurestores_sync.py
index 4b0bc062a6..66088ea771 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_featurestores_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_featurestores_sync.py
@@ -28,18 +28,18 @@
def sample_list_featurestores():
- """Snippet for list_featurestores"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListFeaturestoresRequest(
- parent="projects/{project}/locations/{location}/featurestores/{featurestore}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_featurestores(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_search_features_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_search_features_async.py
index 91c8e18876..2f7bb2b229 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_search_features_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_search_features_async.py
@@ -28,18 +28,18 @@
async def sample_search_features():
- """Snippet for search_features"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.SearchFeaturesRequest(
- location="projects/{project}/locations/{location}",
+ location="location_value",
)
# Make the request
page_result = client.search_features(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_search_features_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_search_features_sync.py
index 27043b73b4..654cbe4137 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_search_features_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_search_features_sync.py
@@ -28,18 +28,18 @@
def sample_search_features():
- """Snippet for search_features"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.SearchFeaturesRequest(
- location="projects/{project}/locations/{location}",
+ location="location_value",
)
# Make the request
page_result = client.search_features(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_entity_type_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_entity_type_async.py
index f4d758b456..c1a1297247 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_entity_type_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_entity_type_async.py
@@ -28,8 +28,6 @@
async def sample_update_entity_type():
- """Snippet for update_entity_type"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient()
@@ -40,7 +38,7 @@ async def sample_update_entity_type():
# Make the request
response = await client.update_entity_type(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_UpdateEntityType_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_entity_type_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_entity_type_sync.py
index bebf403e9a..612279a558 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_entity_type_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_entity_type_sync.py
@@ -28,8 +28,6 @@
def sample_update_entity_type():
- """Snippet for update_entity_type"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceClient()
@@ -40,7 +38,7 @@ def sample_update_entity_type():
# Make the request
response = client.update_entity_type(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_UpdateEntityType_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_feature_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_feature_async.py
index 1aa364be3b..69dccf4786 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_feature_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_feature_async.py
@@ -28,8 +28,6 @@
async def sample_update_feature():
- """Snippet for update_feature"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient()
@@ -44,7 +42,7 @@ async def sample_update_feature():
# Make the request
response = await client.update_feature(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_UpdateFeature_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_feature_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_feature_sync.py
index e7dd948ac5..171428cfaa 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_feature_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_feature_sync.py
@@ -28,8 +28,6 @@
def sample_update_feature():
- """Snippet for update_feature"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceClient()
@@ -44,7 +42,7 @@ def sample_update_feature():
# Make the request
response = client.update_feature(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_UpdateFeature_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_featurestore_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_featurestore_async.py
index 7f1179b8ff..4a8d2d238c 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_featurestore_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_featurestore_async.py
@@ -28,8 +28,6 @@
async def sample_update_featurestore():
- """Snippet for update_featurestore"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient()
@@ -43,6 +41,8 @@ async def sample_update_featurestore():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_UpdateFeaturestore_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_featurestore_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_featurestore_sync.py
index 1741e5f7a3..e219f5751a 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_featurestore_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_featurestore_sync.py
@@ -28,8 +28,6 @@
def sample_update_featurestore():
- """Snippet for update_featurestore"""
-
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceClient()
@@ -43,6 +41,8 @@ def sample_update_featurestore():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_UpdateFeaturestore_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_create_index_endpoint_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_create_index_endpoint_async.py
index a11a611f1c..7879d75029 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_create_index_endpoint_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_create_index_endpoint_async.py
@@ -28,18 +28,15 @@
async def sample_create_index_endpoint():
- """Snippet for create_index_endpoint"""
-
# Create a client
client = aiplatform_v1beta1.IndexEndpointServiceAsyncClient()
# Initialize request argument(s)
index_endpoint = aiplatform_v1beta1.IndexEndpoint()
index_endpoint.display_name = "display_name_value"
- index_endpoint.network = "network_value"
request = aiplatform_v1beta1.CreateIndexEndpointRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
index_endpoint=index_endpoint,
)
@@ -49,6 +46,8 @@ async def sample_create_index_endpoint():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_CreateIndexEndpoint_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_create_index_endpoint_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_create_index_endpoint_sync.py
index 7a5e8fdb17..da9b3c3f71 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_create_index_endpoint_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_create_index_endpoint_sync.py
@@ -28,18 +28,15 @@
def sample_create_index_endpoint():
- """Snippet for create_index_endpoint"""
-
# Create a client
client = aiplatform_v1beta1.IndexEndpointServiceClient()
# Initialize request argument(s)
index_endpoint = aiplatform_v1beta1.IndexEndpoint()
index_endpoint.display_name = "display_name_value"
- index_endpoint.network = "network_value"
request = aiplatform_v1beta1.CreateIndexEndpointRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
index_endpoint=index_endpoint,
)
@@ -49,6 +46,8 @@ def sample_create_index_endpoint():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_CreateIndexEndpoint_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_delete_index_endpoint_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_delete_index_endpoint_async.py
index 42c28c35ba..f6d8715a74 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_delete_index_endpoint_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_delete_index_endpoint_async.py
@@ -28,14 +28,12 @@
async def sample_delete_index_endpoint():
- """Snippet for delete_index_endpoint"""
-
# Create a client
client = aiplatform_v1beta1.IndexEndpointServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteIndexEndpointRequest(
- name="projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_index_endpoint():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_DeleteIndexEndpoint_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_delete_index_endpoint_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_delete_index_endpoint_sync.py
index dc6460c11d..af13bf8dea 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_delete_index_endpoint_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_delete_index_endpoint_sync.py
@@ -28,14 +28,12 @@
def sample_delete_index_endpoint():
- """Snippet for delete_index_endpoint"""
-
# Create a client
client = aiplatform_v1beta1.IndexEndpointServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteIndexEndpointRequest(
- name="projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_index_endpoint():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_DeleteIndexEndpoint_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_deploy_index_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_deploy_index_async.py
index ef631481dd..e4b2d1067b 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_deploy_index_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_deploy_index_async.py
@@ -28,18 +28,16 @@
async def sample_deploy_index():
- """Snippet for deploy_index"""
-
# Create a client
client = aiplatform_v1beta1.IndexEndpointServiceAsyncClient()
# Initialize request argument(s)
deployed_index = aiplatform_v1beta1.DeployedIndex()
deployed_index.id = "id_value"
- deployed_index.index = "projects/{project}/locations/{location}/indexes/{index}"
+ deployed_index.index = "index_value"
request = aiplatform_v1beta1.DeployIndexRequest(
- index_endpoint="projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}",
+ index_endpoint="index_endpoint_value",
deployed_index=deployed_index,
)
@@ -49,6 +47,8 @@ async def sample_deploy_index():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_DeployIndex_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_deploy_index_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_deploy_index_sync.py
index 9d80ed21eb..918e758546 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_deploy_index_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_deploy_index_sync.py
@@ -28,18 +28,16 @@
def sample_deploy_index():
- """Snippet for deploy_index"""
-
# Create a client
client = aiplatform_v1beta1.IndexEndpointServiceClient()
# Initialize request argument(s)
deployed_index = aiplatform_v1beta1.DeployedIndex()
deployed_index.id = "id_value"
- deployed_index.index = "projects/{project}/locations/{location}/indexes/{index}"
+ deployed_index.index = "index_value"
request = aiplatform_v1beta1.DeployIndexRequest(
- index_endpoint="projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}",
+ index_endpoint="index_endpoint_value",
deployed_index=deployed_index,
)
@@ -49,6 +47,8 @@ def sample_deploy_index():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_DeployIndex_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_get_index_endpoint_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_get_index_endpoint_async.py
index 760460c15e..626f412f3d 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_get_index_endpoint_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_get_index_endpoint_async.py
@@ -28,20 +28,18 @@
async def sample_get_index_endpoint():
- """Snippet for get_index_endpoint"""
-
# Create a client
client = aiplatform_v1beta1.IndexEndpointServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetIndexEndpointRequest(
- name="projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}",
+ name="name_value",
)
# Make the request
response = await client.get_index_endpoint(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_GetIndexEndpoint_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_get_index_endpoint_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_get_index_endpoint_sync.py
index 3086025e76..ac824a106b 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_get_index_endpoint_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_get_index_endpoint_sync.py
@@ -28,20 +28,18 @@
def sample_get_index_endpoint():
- """Snippet for get_index_endpoint"""
-
# Create a client
client = aiplatform_v1beta1.IndexEndpointServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetIndexEndpointRequest(
- name="projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}",
+ name="name_value",
)
# Make the request
response = client.get_index_endpoint(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_GetIndexEndpoint_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_list_index_endpoints_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_list_index_endpoints_async.py
index 0bca79ad32..05836f5a8d 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_list_index_endpoints_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_list_index_endpoints_async.py
@@ -28,18 +28,18 @@
async def sample_list_index_endpoints():
- """Snippet for list_index_endpoints"""
-
# Create a client
client = aiplatform_v1beta1.IndexEndpointServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListIndexEndpointsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_index_endpoints(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_list_index_endpoints_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_list_index_endpoints_sync.py
index 9024b7adbb..8f4784b0d4 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_list_index_endpoints_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_list_index_endpoints_sync.py
@@ -28,18 +28,18 @@
def sample_list_index_endpoints():
- """Snippet for list_index_endpoints"""
-
# Create a client
client = aiplatform_v1beta1.IndexEndpointServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListIndexEndpointsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_index_endpoints(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_mutate_deployed_index_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_mutate_deployed_index_async.py
new file mode 100644
index 0000000000..a7d431f9e8
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_mutate_deployed_index_async.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for MutateDeployedIndex
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_MutateDeployedIndex_async]
+from google.cloud import aiplatform_v1beta1
+
+
+async def sample_mutate_deployed_index():
+ # Create a client
+ client = aiplatform_v1beta1.IndexEndpointServiceAsyncClient()
+
+ # Initialize request argument(s)
+ deployed_index = aiplatform_v1beta1.DeployedIndex()
+ deployed_index.id = "id_value"
+ deployed_index.index = "index_value"
+
+ request = aiplatform_v1beta1.MutateDeployedIndexRequest(
+ index_endpoint="index_endpoint_value",
+ deployed_index=deployed_index,
+ )
+
+ # Make the request
+ operation = client.mutate_deployed_index(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = await operation.result()
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_MutateDeployedIndex_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_mutate_deployed_index_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_mutate_deployed_index_sync.py
new file mode 100644
index 0000000000..e6009bae68
--- /dev/null
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_mutate_deployed_index_sync.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated code. DO NOT EDIT!
+#
+# Snippet for MutateDeployedIndex
+# NOTE: This snippet has been automatically generated for illustrative purposes only.
+# It may require modifications to work in your environment.
+
+# To install the latest published package dependency, execute the following:
+# python3 -m pip install google-cloud-aiplatform
+
+
+# [START aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_MutateDeployedIndex_sync]
+from google.cloud import aiplatform_v1beta1
+
+
+def sample_mutate_deployed_index():
+ # Create a client
+ client = aiplatform_v1beta1.IndexEndpointServiceClient()
+
+ # Initialize request argument(s)
+ deployed_index = aiplatform_v1beta1.DeployedIndex()
+ deployed_index.id = "id_value"
+ deployed_index.index = "index_value"
+
+ request = aiplatform_v1beta1.MutateDeployedIndexRequest(
+ index_endpoint="index_endpoint_value",
+ deployed_index=deployed_index,
+ )
+
+ # Make the request
+ operation = client.mutate_deployed_index(request=request)
+
+ print("Waiting for operation to complete...")
+
+ response = operation.result()
+
+ # Handle the response
+ print(response)
+
+# [END aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_MutateDeployedIndex_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_undeploy_index_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_undeploy_index_async.py
index baa98a1bff..65a5a90845 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_undeploy_index_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_undeploy_index_async.py
@@ -28,14 +28,12 @@
async def sample_undeploy_index():
- """Snippet for undeploy_index"""
-
# Create a client
client = aiplatform_v1beta1.IndexEndpointServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.UndeployIndexRequest(
- index_endpoint="projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}",
+ index_endpoint="index_endpoint_value",
deployed_index_id="deployed_index_id_value",
)
@@ -45,6 +43,8 @@ async def sample_undeploy_index():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_UndeployIndex_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_undeploy_index_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_undeploy_index_sync.py
index d09977a8d2..028654de1a 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_undeploy_index_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_undeploy_index_sync.py
@@ -28,14 +28,12 @@
def sample_undeploy_index():
- """Snippet for undeploy_index"""
-
# Create a client
client = aiplatform_v1beta1.IndexEndpointServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.UndeployIndexRequest(
- index_endpoint="projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}",
+ index_endpoint="index_endpoint_value",
deployed_index_id="deployed_index_id_value",
)
@@ -45,6 +43,8 @@ def sample_undeploy_index():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_UndeployIndex_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_update_index_endpoint_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_update_index_endpoint_async.py
index 6b677e2e8c..8836d2c41b 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_update_index_endpoint_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_update_index_endpoint_async.py
@@ -28,15 +28,12 @@
async def sample_update_index_endpoint():
- """Snippet for update_index_endpoint"""
-
# Create a client
client = aiplatform_v1beta1.IndexEndpointServiceAsyncClient()
# Initialize request argument(s)
index_endpoint = aiplatform_v1beta1.IndexEndpoint()
index_endpoint.display_name = "display_name_value"
- index_endpoint.network = "network_value"
request = aiplatform_v1beta1.UpdateIndexEndpointRequest(
index_endpoint=index_endpoint,
@@ -45,7 +42,7 @@ async def sample_update_index_endpoint():
# Make the request
response = await client.update_index_endpoint(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_UpdateIndexEndpoint_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_update_index_endpoint_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_update_index_endpoint_sync.py
index e856f40c3e..f019321a1c 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_update_index_endpoint_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_update_index_endpoint_sync.py
@@ -28,15 +28,12 @@
def sample_update_index_endpoint():
- """Snippet for update_index_endpoint"""
-
# Create a client
client = aiplatform_v1beta1.IndexEndpointServiceClient()
# Initialize request argument(s)
index_endpoint = aiplatform_v1beta1.IndexEndpoint()
index_endpoint.display_name = "display_name_value"
- index_endpoint.network = "network_value"
request = aiplatform_v1beta1.UpdateIndexEndpointRequest(
index_endpoint=index_endpoint,
@@ -45,7 +42,7 @@ def sample_update_index_endpoint():
# Make the request
response = client.update_index_endpoint(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_UpdateIndexEndpoint_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_create_index_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_create_index_async.py
index 35da80a6b0..e83453a6dd 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_create_index_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_create_index_async.py
@@ -28,8 +28,6 @@
async def sample_create_index():
- """Snippet for create_index"""
-
# Create a client
client = aiplatform_v1beta1.IndexServiceAsyncClient()
@@ -38,7 +36,7 @@ async def sample_create_index():
index.display_name = "display_name_value"
request = aiplatform_v1beta1.CreateIndexRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
index=index,
)
@@ -48,6 +46,8 @@ async def sample_create_index():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_IndexService_CreateIndex_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_create_index_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_create_index_sync.py
index 868c7724e0..f926793a50 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_create_index_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_create_index_sync.py
@@ -28,8 +28,6 @@
def sample_create_index():
- """Snippet for create_index"""
-
# Create a client
client = aiplatform_v1beta1.IndexServiceClient()
@@ -38,7 +36,7 @@ def sample_create_index():
index.display_name = "display_name_value"
request = aiplatform_v1beta1.CreateIndexRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
index=index,
)
@@ -48,6 +46,8 @@ def sample_create_index():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_IndexService_CreateIndex_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_delete_index_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_delete_index_async.py
index 5b4a4cd14e..639eb277aa 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_delete_index_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_delete_index_async.py
@@ -28,14 +28,12 @@
async def sample_delete_index():
- """Snippet for delete_index"""
-
# Create a client
client = aiplatform_v1beta1.IndexServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteIndexRequest(
- name="projects/{project}/locations/{location}/indexes/{index}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_index():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_IndexService_DeleteIndex_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_delete_index_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_delete_index_sync.py
index 358f362d60..b93c644d66 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_delete_index_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_delete_index_sync.py
@@ -28,14 +28,12 @@
def sample_delete_index():
- """Snippet for delete_index"""
-
# Create a client
client = aiplatform_v1beta1.IndexServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteIndexRequest(
- name="projects/{project}/locations/{location}/indexes/{index}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_index():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_IndexService_DeleteIndex_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_get_index_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_get_index_async.py
index 32428c410d..22c6536b7d 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_get_index_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_get_index_async.py
@@ -28,20 +28,18 @@
async def sample_get_index():
- """Snippet for get_index"""
-
# Create a client
client = aiplatform_v1beta1.IndexServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetIndexRequest(
- name="projects/{project}/locations/{location}/indexes/{index}",
+ name="name_value",
)
# Make the request
response = await client.get_index(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_IndexService_GetIndex_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_get_index_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_get_index_sync.py
index 190e3c4a38..dbc394267c 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_get_index_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_get_index_sync.py
@@ -28,20 +28,18 @@
def sample_get_index():
- """Snippet for get_index"""
-
# Create a client
client = aiplatform_v1beta1.IndexServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetIndexRequest(
- name="projects/{project}/locations/{location}/indexes/{index}",
+ name="name_value",
)
# Make the request
response = client.get_index(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_IndexService_GetIndex_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_list_indexes_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_list_indexes_async.py
index d659ab94db..e7fa031e5f 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_list_indexes_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_list_indexes_async.py
@@ -28,18 +28,18 @@
async def sample_list_indexes():
- """Snippet for list_indexes"""
-
# Create a client
client = aiplatform_v1beta1.IndexServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListIndexesRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_indexes(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_list_indexes_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_list_indexes_sync.py
index e2a886ca57..496c01157b 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_list_indexes_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_list_indexes_sync.py
@@ -28,18 +28,18 @@
def sample_list_indexes():
- """Snippet for list_indexes"""
-
# Create a client
client = aiplatform_v1beta1.IndexServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListIndexesRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_indexes(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_update_index_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_update_index_async.py
index 0b907bfcc8..5cbec63edf 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_update_index_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_update_index_async.py
@@ -28,8 +28,6 @@
async def sample_update_index():
- """Snippet for update_index"""
-
# Create a client
client = aiplatform_v1beta1.IndexServiceAsyncClient()
@@ -47,6 +45,8 @@ async def sample_update_index():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_IndexService_UpdateIndex_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_update_index_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_update_index_sync.py
index 3488e3f188..49b25c19c8 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_update_index_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_index_service_update_index_sync.py
@@ -28,8 +28,6 @@
def sample_update_index():
- """Snippet for update_index"""
-
# Create a client
client = aiplatform_v1beta1.IndexServiceClient()
@@ -47,6 +45,8 @@ def sample_update_index():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_IndexService_UpdateIndex_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_batch_prediction_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_batch_prediction_job_async.py
index d961247794..88c1a65a09 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_batch_prediction_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_batch_prediction_job_async.py
@@ -28,18 +28,16 @@
async def sample_cancel_batch_prediction_job():
- """Snippet for cancel_batch_prediction_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.CancelBatchPredictionJobRequest(
- name="projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}",
+ name="name_value",
)
# Make the request
- response = await client.cancel_batch_prediction_job(request=request)
+ await client.cancel_batch_prediction_job(request=request)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_CancelBatchPredictionJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_batch_prediction_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_batch_prediction_job_sync.py
index 4ca71f610b..a80c7d6b1c 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_batch_prediction_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_batch_prediction_job_sync.py
@@ -28,18 +28,16 @@
def sample_cancel_batch_prediction_job():
- """Snippet for cancel_batch_prediction_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.CancelBatchPredictionJobRequest(
- name="projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}",
+ name="name_value",
)
# Make the request
- response = client.cancel_batch_prediction_job(request=request)
+ client.cancel_batch_prediction_job(request=request)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_CancelBatchPredictionJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_custom_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_custom_job_async.py
index c8e29652f9..fe455645db 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_custom_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_custom_job_async.py
@@ -28,18 +28,16 @@
async def sample_cancel_custom_job():
- """Snippet for cancel_custom_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.CancelCustomJobRequest(
- name="projects/{project}/locations/{location}/customJobs/{custom_job}",
+ name="name_value",
)
# Make the request
- response = await client.cancel_custom_job(request=request)
+ await client.cancel_custom_job(request=request)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_CancelCustomJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_custom_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_custom_job_sync.py
index 703a25a304..00d98b0459 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_custom_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_custom_job_sync.py
@@ -28,18 +28,16 @@
def sample_cancel_custom_job():
- """Snippet for cancel_custom_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.CancelCustomJobRequest(
- name="projects/{project}/locations/{location}/customJobs/{custom_job}",
+ name="name_value",
)
# Make the request
- response = client.cancel_custom_job(request=request)
+ client.cancel_custom_job(request=request)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_CancelCustomJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_data_labeling_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_data_labeling_job_async.py
index 949c4650e8..811aa785da 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_data_labeling_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_data_labeling_job_async.py
@@ -28,18 +28,16 @@
async def sample_cancel_data_labeling_job():
- """Snippet for cancel_data_labeling_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.CancelDataLabelingJobRequest(
- name="projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}",
+ name="name_value",
)
# Make the request
- response = await client.cancel_data_labeling_job(request=request)
+ await client.cancel_data_labeling_job(request=request)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_CancelDataLabelingJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_data_labeling_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_data_labeling_job_sync.py
index 7a42c3bc8c..0fb43a243c 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_data_labeling_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_data_labeling_job_sync.py
@@ -28,18 +28,16 @@
def sample_cancel_data_labeling_job():
- """Snippet for cancel_data_labeling_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.CancelDataLabelingJobRequest(
- name="projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}",
+ name="name_value",
)
# Make the request
- response = client.cancel_data_labeling_job(request=request)
+ client.cancel_data_labeling_job(request=request)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_CancelDataLabelingJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_hyperparameter_tuning_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_hyperparameter_tuning_job_async.py
index a38660f8ac..ec4dba8581 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_hyperparameter_tuning_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_hyperparameter_tuning_job_async.py
@@ -28,18 +28,16 @@
async def sample_cancel_hyperparameter_tuning_job():
- """Snippet for cancel_hyperparameter_tuning_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.CancelHyperparameterTuningJobRequest(
- name="projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}",
+ name="name_value",
)
# Make the request
- response = await client.cancel_hyperparameter_tuning_job(request=request)
+ await client.cancel_hyperparameter_tuning_job(request=request)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_CancelHyperparameterTuningJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_hyperparameter_tuning_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_hyperparameter_tuning_job_sync.py
index 803b7eafcf..6513407b1e 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_hyperparameter_tuning_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_cancel_hyperparameter_tuning_job_sync.py
@@ -28,18 +28,16 @@
def sample_cancel_hyperparameter_tuning_job():
- """Snippet for cancel_hyperparameter_tuning_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.CancelHyperparameterTuningJobRequest(
- name="projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}",
+ name="name_value",
)
# Make the request
- response = client.cancel_hyperparameter_tuning_job(request=request)
+ client.cancel_hyperparameter_tuning_job(request=request)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_CancelHyperparameterTuningJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_batch_prediction_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_batch_prediction_job_async.py
index 3040a23c4d..56d6151eed 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_batch_prediction_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_batch_prediction_job_async.py
@@ -28,29 +28,26 @@
async def sample_create_batch_prediction_job():
- """Snippet for create_batch_prediction_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceAsyncClient()
# Initialize request argument(s)
batch_prediction_job = aiplatform_v1beta1.BatchPredictionJob()
batch_prediction_job.display_name = "display_name_value"
- batch_prediction_job.model = "projects/{project}/locations/{location}/models/{model}"
batch_prediction_job.input_config.gcs_source.uris = ['uris_value_1', 'uris_value_2']
batch_prediction_job.input_config.instances_format = "instances_format_value"
batch_prediction_job.output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value"
batch_prediction_job.output_config.predictions_format = "predictions_format_value"
request = aiplatform_v1beta1.CreateBatchPredictionJobRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
batch_prediction_job=batch_prediction_job,
)
# Make the request
response = await client.create_batch_prediction_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_CreateBatchPredictionJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_batch_prediction_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_batch_prediction_job_sync.py
index d6822bdcfa..5e131a03e8 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_batch_prediction_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_batch_prediction_job_sync.py
@@ -28,29 +28,26 @@
def sample_create_batch_prediction_job():
- """Snippet for create_batch_prediction_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceClient()
# Initialize request argument(s)
batch_prediction_job = aiplatform_v1beta1.BatchPredictionJob()
batch_prediction_job.display_name = "display_name_value"
- batch_prediction_job.model = "projects/{project}/locations/{location}/models/{model}"
batch_prediction_job.input_config.gcs_source.uris = ['uris_value_1', 'uris_value_2']
batch_prediction_job.input_config.instances_format = "instances_format_value"
batch_prediction_job.output_config.gcs_destination.output_uri_prefix = "output_uri_prefix_value"
batch_prediction_job.output_config.predictions_format = "predictions_format_value"
request = aiplatform_v1beta1.CreateBatchPredictionJobRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
batch_prediction_job=batch_prediction_job,
)
# Make the request
response = client.create_batch_prediction_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_CreateBatchPredictionJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_custom_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_custom_job_async.py
index 9ebd4fe08d..f96886e80a 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_custom_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_custom_job_async.py
@@ -28,8 +28,6 @@
async def sample_create_custom_job():
- """Snippet for create_custom_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceAsyncClient()
@@ -39,14 +37,14 @@ async def sample_create_custom_job():
custom_job.job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value"
request = aiplatform_v1beta1.CreateCustomJobRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
custom_job=custom_job,
)
# Make the request
response = await client.create_custom_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_CreateCustomJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_custom_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_custom_job_sync.py
index c002309636..69f2fee8fd 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_custom_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_custom_job_sync.py
@@ -28,8 +28,6 @@
def sample_create_custom_job():
- """Snippet for create_custom_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceClient()
@@ -39,14 +37,14 @@ def sample_create_custom_job():
custom_job.job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value"
request = aiplatform_v1beta1.CreateCustomJobRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
custom_job=custom_job,
)
# Make the request
response = client.create_custom_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_CreateCustomJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_data_labeling_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_data_labeling_job_async.py
index 04d893c315..e2da99e739 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_data_labeling_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_data_labeling_job_async.py
@@ -28,29 +28,27 @@
async def sample_create_data_labeling_job():
- """Snippet for create_data_labeling_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceAsyncClient()
# Initialize request argument(s)
data_labeling_job = aiplatform_v1beta1.DataLabelingJob()
data_labeling_job.display_name = "display_name_value"
- data_labeling_job.datasets = "projects/{project}/locations/{location}/datasets/{dataset}"
+ data_labeling_job.datasets = ['datasets_value_1', 'datasets_value_2']
data_labeling_job.labeler_count = 1375
data_labeling_job.instruction_uri = "instruction_uri_value"
data_labeling_job.inputs_schema_uri = "inputs_schema_uri_value"
data_labeling_job.inputs.null_value = "NULL_VALUE"
request = aiplatform_v1beta1.CreateDataLabelingJobRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
data_labeling_job=data_labeling_job,
)
# Make the request
response = await client.create_data_labeling_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_CreateDataLabelingJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_data_labeling_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_data_labeling_job_sync.py
index 041a2cc58a..c09042dd79 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_data_labeling_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_data_labeling_job_sync.py
@@ -28,29 +28,27 @@
def sample_create_data_labeling_job():
- """Snippet for create_data_labeling_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceClient()
# Initialize request argument(s)
data_labeling_job = aiplatform_v1beta1.DataLabelingJob()
data_labeling_job.display_name = "display_name_value"
- data_labeling_job.datasets = "projects/{project}/locations/{location}/datasets/{dataset}"
+ data_labeling_job.datasets = ['datasets_value_1', 'datasets_value_2']
data_labeling_job.labeler_count = 1375
data_labeling_job.instruction_uri = "instruction_uri_value"
data_labeling_job.inputs_schema_uri = "inputs_schema_uri_value"
data_labeling_job.inputs.null_value = "NULL_VALUE"
request = aiplatform_v1beta1.CreateDataLabelingJobRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
data_labeling_job=data_labeling_job,
)
# Make the request
response = client.create_data_labeling_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_CreateDataLabelingJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_hyperparameter_tuning_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_hyperparameter_tuning_job_async.py
index 05c1d9da43..9425ee2de4 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_hyperparameter_tuning_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_hyperparameter_tuning_job_async.py
@@ -28,8 +28,6 @@
async def sample_create_hyperparameter_tuning_job():
- """Snippet for create_hyperparameter_tuning_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceAsyncClient()
@@ -46,14 +44,14 @@ async def sample_create_hyperparameter_tuning_job():
hyperparameter_tuning_job.trial_job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value"
request = aiplatform_v1beta1.CreateHyperparameterTuningJobRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
hyperparameter_tuning_job=hyperparameter_tuning_job,
)
# Make the request
response = await client.create_hyperparameter_tuning_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_CreateHyperparameterTuningJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_hyperparameter_tuning_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_hyperparameter_tuning_job_sync.py
index 4b785bc88f..c5f3794604 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_hyperparameter_tuning_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_hyperparameter_tuning_job_sync.py
@@ -28,8 +28,6 @@
def sample_create_hyperparameter_tuning_job():
- """Snippet for create_hyperparameter_tuning_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceClient()
@@ -46,14 +44,14 @@ def sample_create_hyperparameter_tuning_job():
hyperparameter_tuning_job.trial_job_spec.worker_pool_specs.container_spec.image_uri = "image_uri_value"
request = aiplatform_v1beta1.CreateHyperparameterTuningJobRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
hyperparameter_tuning_job=hyperparameter_tuning_job,
)
# Make the request
response = client.create_hyperparameter_tuning_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_CreateHyperparameterTuningJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_model_deployment_monitoring_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_model_deployment_monitoring_job_async.py
index bbdd411e1b..b32cda7730 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_model_deployment_monitoring_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_model_deployment_monitoring_job_async.py
@@ -28,25 +28,23 @@
async def sample_create_model_deployment_monitoring_job():
- """Snippet for create_model_deployment_monitoring_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceAsyncClient()
# Initialize request argument(s)
model_deployment_monitoring_job = aiplatform_v1beta1.ModelDeploymentMonitoringJob()
model_deployment_monitoring_job.display_name = "display_name_value"
- model_deployment_monitoring_job.endpoint = "projects/{project}/locations/{location}/endpoints/{endpoint}"
+ model_deployment_monitoring_job.endpoint = "endpoint_value"
request = aiplatform_v1beta1.CreateModelDeploymentMonitoringJobRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
model_deployment_monitoring_job=model_deployment_monitoring_job,
)
# Make the request
response = await client.create_model_deployment_monitoring_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_CreateModelDeploymentMonitoringJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_model_deployment_monitoring_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_model_deployment_monitoring_job_sync.py
index 6c3a14b7e2..2989d2b893 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_model_deployment_monitoring_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_create_model_deployment_monitoring_job_sync.py
@@ -28,25 +28,23 @@
def sample_create_model_deployment_monitoring_job():
- """Snippet for create_model_deployment_monitoring_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceClient()
# Initialize request argument(s)
model_deployment_monitoring_job = aiplatform_v1beta1.ModelDeploymentMonitoringJob()
model_deployment_monitoring_job.display_name = "display_name_value"
- model_deployment_monitoring_job.endpoint = "projects/{project}/locations/{location}/endpoints/{endpoint}"
+ model_deployment_monitoring_job.endpoint = "endpoint_value"
request = aiplatform_v1beta1.CreateModelDeploymentMonitoringJobRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
model_deployment_monitoring_job=model_deployment_monitoring_job,
)
# Make the request
response = client.create_model_deployment_monitoring_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_CreateModelDeploymentMonitoringJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_batch_prediction_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_batch_prediction_job_async.py
index b96b995c3c..ee33e7c117 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_batch_prediction_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_batch_prediction_job_async.py
@@ -28,14 +28,12 @@
async def sample_delete_batch_prediction_job():
- """Snippet for delete_batch_prediction_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteBatchPredictionJobRequest(
- name="projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_batch_prediction_job():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_DeleteBatchPredictionJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_batch_prediction_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_batch_prediction_job_sync.py
index 63a8cc88b7..889ba74224 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_batch_prediction_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_batch_prediction_job_sync.py
@@ -28,14 +28,12 @@
def sample_delete_batch_prediction_job():
- """Snippet for delete_batch_prediction_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteBatchPredictionJobRequest(
- name="projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_batch_prediction_job():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_DeleteBatchPredictionJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_custom_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_custom_job_async.py
index 0cdced4d51..c423d213a0 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_custom_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_custom_job_async.py
@@ -28,14 +28,12 @@
async def sample_delete_custom_job():
- """Snippet for delete_custom_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteCustomJobRequest(
- name="projects/{project}/locations/{location}/customJobs/{custom_job}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_custom_job():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_DeleteCustomJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_custom_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_custom_job_sync.py
index 44ce116e38..49ccd3a3ac 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_custom_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_custom_job_sync.py
@@ -28,14 +28,12 @@
def sample_delete_custom_job():
- """Snippet for delete_custom_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteCustomJobRequest(
- name="projects/{project}/locations/{location}/customJobs/{custom_job}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_custom_job():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_DeleteCustomJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_data_labeling_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_data_labeling_job_async.py
index 8fc86a240a..a1a46199e7 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_data_labeling_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_data_labeling_job_async.py
@@ -28,14 +28,12 @@
async def sample_delete_data_labeling_job():
- """Snippet for delete_data_labeling_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteDataLabelingJobRequest(
- name="projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_data_labeling_job():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_DeleteDataLabelingJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_data_labeling_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_data_labeling_job_sync.py
index feaf764cf6..deae42b8a7 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_data_labeling_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_data_labeling_job_sync.py
@@ -28,14 +28,12 @@
def sample_delete_data_labeling_job():
- """Snippet for delete_data_labeling_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteDataLabelingJobRequest(
- name="projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_data_labeling_job():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_DeleteDataLabelingJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_hyperparameter_tuning_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_hyperparameter_tuning_job_async.py
index c0aaf68792..f9be687ef7 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_hyperparameter_tuning_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_hyperparameter_tuning_job_async.py
@@ -28,14 +28,12 @@
async def sample_delete_hyperparameter_tuning_job():
- """Snippet for delete_hyperparameter_tuning_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteHyperparameterTuningJobRequest(
- name="projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_hyperparameter_tuning_job():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_DeleteHyperparameterTuningJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_hyperparameter_tuning_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_hyperparameter_tuning_job_sync.py
index 2eb4315f0d..65ae82fdfc 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_hyperparameter_tuning_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_hyperparameter_tuning_job_sync.py
@@ -28,14 +28,12 @@
def sample_delete_hyperparameter_tuning_job():
- """Snippet for delete_hyperparameter_tuning_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteHyperparameterTuningJobRequest(
- name="projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_hyperparameter_tuning_job():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_DeleteHyperparameterTuningJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_model_deployment_monitoring_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_model_deployment_monitoring_job_async.py
index 096ac05708..7ec9cba1e5 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_model_deployment_monitoring_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_model_deployment_monitoring_job_async.py
@@ -28,14 +28,12 @@
async def sample_delete_model_deployment_monitoring_job():
- """Snippet for delete_model_deployment_monitoring_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteModelDeploymentMonitoringJobRequest(
- name="projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_model_deployment_monitoring_job():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_DeleteModelDeploymentMonitoringJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_model_deployment_monitoring_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_model_deployment_monitoring_job_sync.py
index b3cb115e4c..954715a8a6 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_model_deployment_monitoring_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_delete_model_deployment_monitoring_job_sync.py
@@ -28,14 +28,12 @@
def sample_delete_model_deployment_monitoring_job():
- """Snippet for delete_model_deployment_monitoring_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteModelDeploymentMonitoringJobRequest(
- name="projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_model_deployment_monitoring_job():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_DeleteModelDeploymentMonitoringJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_batch_prediction_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_batch_prediction_job_async.py
index f13ae7352a..54e9236368 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_batch_prediction_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_batch_prediction_job_async.py
@@ -28,20 +28,18 @@
async def sample_get_batch_prediction_job():
- """Snippet for get_batch_prediction_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetBatchPredictionJobRequest(
- name="projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}",
+ name="name_value",
)
# Make the request
response = await client.get_batch_prediction_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_GetBatchPredictionJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_batch_prediction_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_batch_prediction_job_sync.py
index 1988457e58..636accd7f6 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_batch_prediction_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_batch_prediction_job_sync.py
@@ -28,20 +28,18 @@
def sample_get_batch_prediction_job():
- """Snippet for get_batch_prediction_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetBatchPredictionJobRequest(
- name="projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}",
+ name="name_value",
)
# Make the request
response = client.get_batch_prediction_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_GetBatchPredictionJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_custom_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_custom_job_async.py
index a5b814c150..4c2733a99e 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_custom_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_custom_job_async.py
@@ -28,20 +28,18 @@
async def sample_get_custom_job():
- """Snippet for get_custom_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetCustomJobRequest(
- name="projects/{project}/locations/{location}/customJobs/{custom_job}",
+ name="name_value",
)
# Make the request
response = await client.get_custom_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_GetCustomJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_custom_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_custom_job_sync.py
index 44dfb5ee20..debf01cbd8 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_custom_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_custom_job_sync.py
@@ -28,20 +28,18 @@
def sample_get_custom_job():
- """Snippet for get_custom_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetCustomJobRequest(
- name="projects/{project}/locations/{location}/customJobs/{custom_job}",
+ name="name_value",
)
# Make the request
response = client.get_custom_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_GetCustomJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_data_labeling_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_data_labeling_job_async.py
index c6f097046b..276eab20d4 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_data_labeling_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_data_labeling_job_async.py
@@ -28,20 +28,18 @@
async def sample_get_data_labeling_job():
- """Snippet for get_data_labeling_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetDataLabelingJobRequest(
- name="projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}",
+ name="name_value",
)
# Make the request
response = await client.get_data_labeling_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_GetDataLabelingJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_data_labeling_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_data_labeling_job_sync.py
index 2d9155c6eb..a81359e925 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_data_labeling_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_data_labeling_job_sync.py
@@ -28,20 +28,18 @@
def sample_get_data_labeling_job():
- """Snippet for get_data_labeling_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetDataLabelingJobRequest(
- name="projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}",
+ name="name_value",
)
# Make the request
response = client.get_data_labeling_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_GetDataLabelingJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_hyperparameter_tuning_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_hyperparameter_tuning_job_async.py
index 2c7491c297..3e77372039 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_hyperparameter_tuning_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_hyperparameter_tuning_job_async.py
@@ -28,20 +28,18 @@
async def sample_get_hyperparameter_tuning_job():
- """Snippet for get_hyperparameter_tuning_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetHyperparameterTuningJobRequest(
- name="projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}",
+ name="name_value",
)
# Make the request
response = await client.get_hyperparameter_tuning_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_GetHyperparameterTuningJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_hyperparameter_tuning_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_hyperparameter_tuning_job_sync.py
index 72f332322d..a8c5ff55ad 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_hyperparameter_tuning_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_hyperparameter_tuning_job_sync.py
@@ -28,20 +28,18 @@
def sample_get_hyperparameter_tuning_job():
- """Snippet for get_hyperparameter_tuning_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetHyperparameterTuningJobRequest(
- name="projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}",
+ name="name_value",
)
# Make the request
response = client.get_hyperparameter_tuning_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_GetHyperparameterTuningJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_model_deployment_monitoring_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_model_deployment_monitoring_job_async.py
index 6020509adc..a7b8d6b9fd 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_model_deployment_monitoring_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_model_deployment_monitoring_job_async.py
@@ -28,20 +28,18 @@
async def sample_get_model_deployment_monitoring_job():
- """Snippet for get_model_deployment_monitoring_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetModelDeploymentMonitoringJobRequest(
- name="projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}",
+ name="name_value",
)
# Make the request
response = await client.get_model_deployment_monitoring_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_GetModelDeploymentMonitoringJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_model_deployment_monitoring_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_model_deployment_monitoring_job_sync.py
index 777299bedd..8e7f5451b6 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_model_deployment_monitoring_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_get_model_deployment_monitoring_job_sync.py
@@ -28,20 +28,18 @@
def sample_get_model_deployment_monitoring_job():
- """Snippet for get_model_deployment_monitoring_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetModelDeploymentMonitoringJobRequest(
- name="projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}",
+ name="name_value",
)
# Make the request
response = client.get_model_deployment_monitoring_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_GetModelDeploymentMonitoringJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_batch_prediction_jobs_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_batch_prediction_jobs_async.py
index cc4036df1d..7e915444d1 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_batch_prediction_jobs_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_batch_prediction_jobs_async.py
@@ -28,18 +28,18 @@
async def sample_list_batch_prediction_jobs():
- """Snippet for list_batch_prediction_jobs"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListBatchPredictionJobsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_batch_prediction_jobs(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_batch_prediction_jobs_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_batch_prediction_jobs_sync.py
index 809e7d00a2..33b7c9b80b 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_batch_prediction_jobs_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_batch_prediction_jobs_sync.py
@@ -28,18 +28,18 @@
def sample_list_batch_prediction_jobs():
- """Snippet for list_batch_prediction_jobs"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListBatchPredictionJobsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_batch_prediction_jobs(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_custom_jobs_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_custom_jobs_async.py
index 7211eecb5a..8066776bac 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_custom_jobs_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_custom_jobs_async.py
@@ -28,18 +28,18 @@
async def sample_list_custom_jobs():
- """Snippet for list_custom_jobs"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListCustomJobsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_custom_jobs(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_custom_jobs_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_custom_jobs_sync.py
index 318c24a148..26b81653e7 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_custom_jobs_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_custom_jobs_sync.py
@@ -28,18 +28,18 @@
def sample_list_custom_jobs():
- """Snippet for list_custom_jobs"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListCustomJobsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_custom_jobs(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_data_labeling_jobs_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_data_labeling_jobs_async.py
index 81448d0685..32cad8d354 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_data_labeling_jobs_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_data_labeling_jobs_async.py
@@ -28,18 +28,18 @@
async def sample_list_data_labeling_jobs():
- """Snippet for list_data_labeling_jobs"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListDataLabelingJobsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_data_labeling_jobs(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_data_labeling_jobs_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_data_labeling_jobs_sync.py
index 460754398f..d50459bc24 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_data_labeling_jobs_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_data_labeling_jobs_sync.py
@@ -28,18 +28,18 @@
def sample_list_data_labeling_jobs():
- """Snippet for list_data_labeling_jobs"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListDataLabelingJobsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_data_labeling_jobs(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_hyperparameter_tuning_jobs_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_hyperparameter_tuning_jobs_async.py
index 5b09acb8ae..bbee13435b 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_hyperparameter_tuning_jobs_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_hyperparameter_tuning_jobs_async.py
@@ -28,18 +28,18 @@
async def sample_list_hyperparameter_tuning_jobs():
- """Snippet for list_hyperparameter_tuning_jobs"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListHyperparameterTuningJobsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_hyperparameter_tuning_jobs(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_hyperparameter_tuning_jobs_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_hyperparameter_tuning_jobs_sync.py
index 761cb5a966..bfe8aa8624 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_hyperparameter_tuning_jobs_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_hyperparameter_tuning_jobs_sync.py
@@ -28,18 +28,18 @@
def sample_list_hyperparameter_tuning_jobs():
- """Snippet for list_hyperparameter_tuning_jobs"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListHyperparameterTuningJobsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_hyperparameter_tuning_jobs(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_model_deployment_monitoring_jobs_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_model_deployment_monitoring_jobs_async.py
index 99ee3adb95..b063ed071c 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_model_deployment_monitoring_jobs_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_model_deployment_monitoring_jobs_async.py
@@ -28,18 +28,18 @@
async def sample_list_model_deployment_monitoring_jobs():
- """Snippet for list_model_deployment_monitoring_jobs"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListModelDeploymentMonitoringJobsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_model_deployment_monitoring_jobs(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_model_deployment_monitoring_jobs_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_model_deployment_monitoring_jobs_sync.py
index fb7300cb63..9533d98066 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_model_deployment_monitoring_jobs_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_list_model_deployment_monitoring_jobs_sync.py
@@ -28,18 +28,18 @@
def sample_list_model_deployment_monitoring_jobs():
- """Snippet for list_model_deployment_monitoring_jobs"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListModelDeploymentMonitoringJobsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_model_deployment_monitoring_jobs(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_pause_model_deployment_monitoring_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_pause_model_deployment_monitoring_job_async.py
index d43910a375..200e177350 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_pause_model_deployment_monitoring_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_pause_model_deployment_monitoring_job_async.py
@@ -28,18 +28,16 @@
async def sample_pause_model_deployment_monitoring_job():
- """Snippet for pause_model_deployment_monitoring_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.PauseModelDeploymentMonitoringJobRequest(
- name="projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}",
+ name="name_value",
)
# Make the request
- response = await client.pause_model_deployment_monitoring_job(request=request)
+ await client.pause_model_deployment_monitoring_job(request=request)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_PauseModelDeploymentMonitoringJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_pause_model_deployment_monitoring_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_pause_model_deployment_monitoring_job_sync.py
index 9b823f6e30..d73462b5df 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_pause_model_deployment_monitoring_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_pause_model_deployment_monitoring_job_sync.py
@@ -28,18 +28,16 @@
def sample_pause_model_deployment_monitoring_job():
- """Snippet for pause_model_deployment_monitoring_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.PauseModelDeploymentMonitoringJobRequest(
- name="projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}",
+ name="name_value",
)
# Make the request
- response = client.pause_model_deployment_monitoring_job(request=request)
+ client.pause_model_deployment_monitoring_job(request=request)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_PauseModelDeploymentMonitoringJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_resume_model_deployment_monitoring_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_resume_model_deployment_monitoring_job_async.py
index f3a5d4aac6..e88878037c 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_resume_model_deployment_monitoring_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_resume_model_deployment_monitoring_job_async.py
@@ -28,18 +28,16 @@
async def sample_resume_model_deployment_monitoring_job():
- """Snippet for resume_model_deployment_monitoring_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ResumeModelDeploymentMonitoringJobRequest(
- name="projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}",
+ name="name_value",
)
# Make the request
- response = await client.resume_model_deployment_monitoring_job(request=request)
+ await client.resume_model_deployment_monitoring_job(request=request)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_ResumeModelDeploymentMonitoringJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_resume_model_deployment_monitoring_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_resume_model_deployment_monitoring_job_sync.py
index eb8d06b88f..7061d47a9f 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_resume_model_deployment_monitoring_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_resume_model_deployment_monitoring_job_sync.py
@@ -28,18 +28,16 @@
def sample_resume_model_deployment_monitoring_job():
- """Snippet for resume_model_deployment_monitoring_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ResumeModelDeploymentMonitoringJobRequest(
- name="projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}",
+ name="name_value",
)
# Make the request
- response = client.resume_model_deployment_monitoring_job(request=request)
+ client.resume_model_deployment_monitoring_job(request=request)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_ResumeModelDeploymentMonitoringJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_search_model_deployment_monitoring_stats_anomalies_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_search_model_deployment_monitoring_stats_anomalies_async.py
index f50e3e3925..4d80cd94b7 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_search_model_deployment_monitoring_stats_anomalies_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_search_model_deployment_monitoring_stats_anomalies_async.py
@@ -28,19 +28,19 @@
async def sample_search_model_deployment_monitoring_stats_anomalies():
- """Snippet for search_model_deployment_monitoring_stats_anomalies"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.SearchModelDeploymentMonitoringStatsAnomaliesRequest(
- model_deployment_monitoring_job="projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}",
+ model_deployment_monitoring_job="model_deployment_monitoring_job_value",
deployed_model_id="deployed_model_id_value",
)
# Make the request
page_result = client.search_model_deployment_monitoring_stats_anomalies(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py
index d4ec57242f..cf7d678475 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py
@@ -28,19 +28,19 @@
def sample_search_model_deployment_monitoring_stats_anomalies():
- """Snippet for search_model_deployment_monitoring_stats_anomalies"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.SearchModelDeploymentMonitoringStatsAnomaliesRequest(
- model_deployment_monitoring_job="projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}",
+ model_deployment_monitoring_job="model_deployment_monitoring_job_value",
deployed_model_id="deployed_model_id_value",
)
# Make the request
page_result = client.search_model_deployment_monitoring_stats_anomalies(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_update_model_deployment_monitoring_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_update_model_deployment_monitoring_job_async.py
index b50e0159d5..d5b40ae443 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_update_model_deployment_monitoring_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_update_model_deployment_monitoring_job_async.py
@@ -28,15 +28,13 @@
async def sample_update_model_deployment_monitoring_job():
- """Snippet for update_model_deployment_monitoring_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceAsyncClient()
# Initialize request argument(s)
model_deployment_monitoring_job = aiplatform_v1beta1.ModelDeploymentMonitoringJob()
model_deployment_monitoring_job.display_name = "display_name_value"
- model_deployment_monitoring_job.endpoint = "projects/{project}/locations/{location}/endpoints/{endpoint}"
+ model_deployment_monitoring_job.endpoint = "endpoint_value"
request = aiplatform_v1beta1.UpdateModelDeploymentMonitoringJobRequest(
model_deployment_monitoring_job=model_deployment_monitoring_job,
@@ -48,6 +46,8 @@ async def sample_update_model_deployment_monitoring_job():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_UpdateModelDeploymentMonitoringJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_update_model_deployment_monitoring_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_update_model_deployment_monitoring_job_sync.py
index f600d3007e..0e51eb816b 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_update_model_deployment_monitoring_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_job_service_update_model_deployment_monitoring_job_sync.py
@@ -28,15 +28,13 @@
def sample_update_model_deployment_monitoring_job():
- """Snippet for update_model_deployment_monitoring_job"""
-
# Create a client
client = aiplatform_v1beta1.JobServiceClient()
# Initialize request argument(s)
model_deployment_monitoring_job = aiplatform_v1beta1.ModelDeploymentMonitoringJob()
model_deployment_monitoring_job.display_name = "display_name_value"
- model_deployment_monitoring_job.endpoint = "projects/{project}/locations/{location}/endpoints/{endpoint}"
+ model_deployment_monitoring_job.endpoint = "endpoint_value"
request = aiplatform_v1beta1.UpdateModelDeploymentMonitoringJobRequest(
model_deployment_monitoring_job=model_deployment_monitoring_job,
@@ -48,6 +46,8 @@ def sample_update_model_deployment_monitoring_job():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_JobService_UpdateModelDeploymentMonitoringJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_add_context_artifacts_and_executions_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_add_context_artifacts_and_executions_async.py
index 8f7183bc3e..4f3fee2c3c 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_add_context_artifacts_and_executions_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_add_context_artifacts_and_executions_async.py
@@ -28,20 +28,18 @@
async def sample_add_context_artifacts_and_executions():
- """Snippet for add_context_artifacts_and_executions"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.AddContextArtifactsAndExecutionsRequest(
- context="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}",
+ context="context_value",
)
# Make the request
response = await client.add_context_artifacts_and_executions(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_AddContextArtifactsAndExecutions_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_add_context_artifacts_and_executions_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_add_context_artifacts_and_executions_sync.py
index a2b7699a78..7485ed98cb 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_add_context_artifacts_and_executions_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_add_context_artifacts_and_executions_sync.py
@@ -28,20 +28,18 @@
def sample_add_context_artifacts_and_executions():
- """Snippet for add_context_artifacts_and_executions"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.AddContextArtifactsAndExecutionsRequest(
- context="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}",
+ context="context_value",
)
# Make the request
response = client.add_context_artifacts_and_executions(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_AddContextArtifactsAndExecutions_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_add_context_children_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_add_context_children_async.py
index 7a2967260a..2697d870be 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_add_context_children_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_add_context_children_async.py
@@ -28,20 +28,18 @@
async def sample_add_context_children():
- """Snippet for add_context_children"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.AddContextChildrenRequest(
- context="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}",
+ context="context_value",
)
# Make the request
response = await client.add_context_children(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_AddContextChildren_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_add_context_children_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_add_context_children_sync.py
index 1688e547a6..635d4dea4b 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_add_context_children_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_add_context_children_sync.py
@@ -28,20 +28,18 @@
def sample_add_context_children():
- """Snippet for add_context_children"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.AddContextChildrenRequest(
- context="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}",
+ context="context_value",
)
# Make the request
response = client.add_context_children(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_AddContextChildren_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_add_execution_events_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_add_execution_events_async.py
index 34c4e832d7..4b68d55c0f 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_add_execution_events_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_add_execution_events_async.py
@@ -28,20 +28,18 @@
async def sample_add_execution_events():
- """Snippet for add_execution_events"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.AddExecutionEventsRequest(
- execution="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}",
+ execution="execution_value",
)
# Make the request
response = await client.add_execution_events(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_AddExecutionEvents_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_add_execution_events_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_add_execution_events_sync.py
index 67196dd21a..3a2b11d7f1 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_add_execution_events_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_add_execution_events_sync.py
@@ -28,20 +28,18 @@
def sample_add_execution_events():
- """Snippet for add_execution_events"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.AddExecutionEventsRequest(
- execution="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}",
+ execution="execution_value",
)
# Make the request
response = client.add_execution_events(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_AddExecutionEvents_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_artifact_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_artifact_async.py
index 6e573f979c..8d4a133daa 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_artifact_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_artifact_async.py
@@ -28,20 +28,18 @@
async def sample_create_artifact():
- """Snippet for create_artifact"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.CreateArtifactRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}",
+ parent="parent_value",
)
# Make the request
response = await client.create_artifact(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_CreateArtifact_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_artifact_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_artifact_sync.py
index 257b695e36..55527bf840 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_artifact_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_artifact_sync.py
@@ -28,20 +28,18 @@
def sample_create_artifact():
- """Snippet for create_artifact"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.CreateArtifactRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}",
+ parent="parent_value",
)
# Make the request
response = client.create_artifact(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_CreateArtifact_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_context_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_context_async.py
index 4e254cd501..44b800e67b 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_context_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_context_async.py
@@ -28,20 +28,18 @@
async def sample_create_context():
- """Snippet for create_context"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.CreateContextRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}",
+ parent="parent_value",
)
# Make the request
response = await client.create_context(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_CreateContext_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_context_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_context_sync.py
index b96aa56687..8ec94f0832 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_context_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_context_sync.py
@@ -28,20 +28,18 @@
def sample_create_context():
- """Snippet for create_context"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.CreateContextRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}",
+ parent="parent_value",
)
# Make the request
response = client.create_context(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_CreateContext_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_execution_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_execution_async.py
index 0d4af5f01e..32dd1ccabd 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_execution_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_execution_async.py
@@ -28,20 +28,18 @@
async def sample_create_execution():
- """Snippet for create_execution"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.CreateExecutionRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}",
+ parent="parent_value",
)
# Make the request
response = await client.create_execution(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_CreateExecution_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_execution_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_execution_sync.py
index 046f842c2c..05d3270f35 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_execution_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_execution_sync.py
@@ -28,20 +28,18 @@
def sample_create_execution():
- """Snippet for create_execution"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.CreateExecutionRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}",
+ parent="parent_value",
)
# Make the request
response = client.create_execution(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_CreateExecution_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_metadata_schema_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_metadata_schema_async.py
index 1bf2ca748b..36a18859cb 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_metadata_schema_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_metadata_schema_async.py
@@ -28,8 +28,6 @@
async def sample_create_metadata_schema():
- """Snippet for create_metadata_schema"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceAsyncClient()
@@ -38,14 +36,14 @@ async def sample_create_metadata_schema():
metadata_schema.schema = "schema_value"
request = aiplatform_v1beta1.CreateMetadataSchemaRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}",
+ parent="parent_value",
metadata_schema=metadata_schema,
)
# Make the request
response = await client.create_metadata_schema(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_CreateMetadataSchema_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_metadata_schema_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_metadata_schema_sync.py
index b6eace9a35..2d4afec703 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_metadata_schema_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_metadata_schema_sync.py
@@ -28,8 +28,6 @@
def sample_create_metadata_schema():
- """Snippet for create_metadata_schema"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceClient()
@@ -38,14 +36,14 @@ def sample_create_metadata_schema():
metadata_schema.schema = "schema_value"
request = aiplatform_v1beta1.CreateMetadataSchemaRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}",
+ parent="parent_value",
metadata_schema=metadata_schema,
)
# Make the request
response = client.create_metadata_schema(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_CreateMetadataSchema_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_metadata_store_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_metadata_store_async.py
index e204e98193..887b34a2d4 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_metadata_store_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_metadata_store_async.py
@@ -28,14 +28,12 @@
async def sample_create_metadata_store():
- """Snippet for create_metadata_store"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.CreateMetadataStoreRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_create_metadata_store():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_CreateMetadataStore_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_metadata_store_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_metadata_store_sync.py
index a00f2d05df..baf3ca0899 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_metadata_store_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_create_metadata_store_sync.py
@@ -28,14 +28,12 @@
def sample_create_metadata_store():
- """Snippet for create_metadata_store"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.CreateMetadataStoreRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_create_metadata_store():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_CreateMetadataStore_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_artifact_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_artifact_async.py
index 6c95d08087..d680920684 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_artifact_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_artifact_async.py
@@ -28,14 +28,12 @@
async def sample_delete_artifact():
- """Snippet for delete_artifact"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteArtifactRequest(
- name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_artifact():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_DeleteArtifact_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_artifact_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_artifact_sync.py
index 99946aed3a..a6c0e861e7 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_artifact_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_artifact_sync.py
@@ -28,14 +28,12 @@
def sample_delete_artifact():
- """Snippet for delete_artifact"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteArtifactRequest(
- name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_artifact():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_DeleteArtifact_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_context_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_context_async.py
index c02bfe4685..e883988bdb 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_context_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_context_async.py
@@ -28,14 +28,12 @@
async def sample_delete_context():
- """Snippet for delete_context"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteContextRequest(
- name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_context():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_DeleteContext_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_context_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_context_sync.py
index 30815c91a7..5c28cd649a 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_context_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_context_sync.py
@@ -28,14 +28,12 @@
def sample_delete_context():
- """Snippet for delete_context"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteContextRequest(
- name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_context():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_DeleteContext_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_execution_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_execution_async.py
index 82f71880cd..e015618490 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_execution_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_execution_async.py
@@ -28,14 +28,12 @@
async def sample_delete_execution():
- """Snippet for delete_execution"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteExecutionRequest(
- name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_execution():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_DeleteExecution_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_execution_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_execution_sync.py
index f5f72019cb..f064292d9a 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_execution_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_execution_sync.py
@@ -28,14 +28,12 @@
def sample_delete_execution():
- """Snippet for delete_execution"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteExecutionRequest(
- name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_execution():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_DeleteExecution_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_metadata_store_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_metadata_store_async.py
index cfafda0d97..f43235b370 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_metadata_store_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_metadata_store_async.py
@@ -28,14 +28,12 @@
async def sample_delete_metadata_store():
- """Snippet for delete_metadata_store"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteMetadataStoreRequest(
- name="projects/{project}/locations/{location}/metadataStores/{metadata_store}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_metadata_store():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_DeleteMetadataStore_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_metadata_store_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_metadata_store_sync.py
index 7cf8a70b18..43551170ed 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_metadata_store_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_metadata_store_sync.py
@@ -28,14 +28,12 @@
def sample_delete_metadata_store():
- """Snippet for delete_metadata_store"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteMetadataStoreRequest(
- name="projects/{project}/locations/{location}/metadataStores/{metadata_store}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_metadata_store():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_DeleteMetadataStore_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_artifact_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_artifact_async.py
index 99013ee78e..d7ac573e0b 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_artifact_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_artifact_async.py
@@ -28,20 +28,18 @@
async def sample_get_artifact():
- """Snippet for get_artifact"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetArtifactRequest(
- name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}",
+ name="name_value",
)
# Make the request
response = await client.get_artifact(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_GetArtifact_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_artifact_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_artifact_sync.py
index e232af887f..c128c880ca 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_artifact_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_artifact_sync.py
@@ -28,20 +28,18 @@
def sample_get_artifact():
- """Snippet for get_artifact"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetArtifactRequest(
- name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}",
+ name="name_value",
)
# Make the request
response = client.get_artifact(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_GetArtifact_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_context_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_context_async.py
index 91c1b134d9..4253527cf6 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_context_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_context_async.py
@@ -28,20 +28,18 @@
async def sample_get_context():
- """Snippet for get_context"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetContextRequest(
- name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}",
+ name="name_value",
)
# Make the request
response = await client.get_context(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_GetContext_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_context_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_context_sync.py
index 906c5d09f5..a5e4694049 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_context_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_context_sync.py
@@ -28,20 +28,18 @@
def sample_get_context():
- """Snippet for get_context"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetContextRequest(
- name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}",
+ name="name_value",
)
# Make the request
response = client.get_context(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_GetContext_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_execution_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_execution_async.py
index efc6359ddd..66012f9e20 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_execution_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_execution_async.py
@@ -28,20 +28,18 @@
async def sample_get_execution():
- """Snippet for get_execution"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetExecutionRequest(
- name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}",
+ name="name_value",
)
# Make the request
response = await client.get_execution(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_GetExecution_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_execution_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_execution_sync.py
index 9e3b286c09..b99ff65e70 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_execution_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_execution_sync.py
@@ -28,20 +28,18 @@
def sample_get_execution():
- """Snippet for get_execution"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetExecutionRequest(
- name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}",
+ name="name_value",
)
# Make the request
response = client.get_execution(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_GetExecution_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_metadata_schema_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_metadata_schema_async.py
index c2a0424236..0bb95638a7 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_metadata_schema_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_metadata_schema_async.py
@@ -28,20 +28,18 @@
async def sample_get_metadata_schema():
- """Snippet for get_metadata_schema"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetMetadataSchemaRequest(
- name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/metadataSchemas/{metadata_schema}",
+ name="name_value",
)
# Make the request
response = await client.get_metadata_schema(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_GetMetadataSchema_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_metadata_schema_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_metadata_schema_sync.py
index 8133bf29fa..ab4a6eb4ec 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_metadata_schema_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_metadata_schema_sync.py
@@ -28,20 +28,18 @@
def sample_get_metadata_schema():
- """Snippet for get_metadata_schema"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetMetadataSchemaRequest(
- name="projects/{project}/locations/{location}/metadataStores/{metadata_store}/metadataSchemas/{metadata_schema}",
+ name="name_value",
)
# Make the request
response = client.get_metadata_schema(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_GetMetadataSchema_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_metadata_store_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_metadata_store_async.py
index 371dbfb44d..b3158af01f 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_metadata_store_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_metadata_store_async.py
@@ -28,20 +28,18 @@
async def sample_get_metadata_store():
- """Snippet for get_metadata_store"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetMetadataStoreRequest(
- name="projects/{project}/locations/{location}/metadataStores/{metadata_store}",
+ name="name_value",
)
# Make the request
response = await client.get_metadata_store(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_GetMetadataStore_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_metadata_store_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_metadata_store_sync.py
index e750b96e75..ed2d24bac9 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_metadata_store_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_get_metadata_store_sync.py
@@ -28,20 +28,18 @@
def sample_get_metadata_store():
- """Snippet for get_metadata_store"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetMetadataStoreRequest(
- name="projects/{project}/locations/{location}/metadataStores/{metadata_store}",
+ name="name_value",
)
# Make the request
response = client.get_metadata_store(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_GetMetadataStore_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_artifacts_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_artifacts_async.py
index 804ea45b5e..33aa02602f 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_artifacts_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_artifacts_async.py
@@ -28,18 +28,18 @@
async def sample_list_artifacts():
- """Snippet for list_artifacts"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListArtifactsRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_artifacts(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_artifacts_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_artifacts_sync.py
index 2a6d987ce9..b7bd5732f6 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_artifacts_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_artifacts_sync.py
@@ -28,18 +28,18 @@
def sample_list_artifacts():
- """Snippet for list_artifacts"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListArtifactsRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_artifacts(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_contexts_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_contexts_async.py
index f59ee9467d..f4abd107cb 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_contexts_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_contexts_async.py
@@ -28,18 +28,18 @@
async def sample_list_contexts():
- """Snippet for list_contexts"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListContextsRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_contexts(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_contexts_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_contexts_sync.py
index 357725fae9..ff137fd1ea 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_contexts_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_contexts_sync.py
@@ -28,18 +28,18 @@
def sample_list_contexts():
- """Snippet for list_contexts"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListContextsRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_contexts(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_executions_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_executions_async.py
index 74c78d0824..5c4bf9b143 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_executions_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_executions_async.py
@@ -28,18 +28,18 @@
async def sample_list_executions():
- """Snippet for list_executions"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListExecutionsRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_executions(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_executions_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_executions_sync.py
index c772027db2..10d0bb5440 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_executions_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_executions_sync.py
@@ -28,18 +28,18 @@
def sample_list_executions():
- """Snippet for list_executions"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListExecutionsRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_executions(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_metadata_schemas_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_metadata_schemas_async.py
index e4083560e2..a23444802c 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_metadata_schemas_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_metadata_schemas_async.py
@@ -28,18 +28,18 @@
async def sample_list_metadata_schemas():
- """Snippet for list_metadata_schemas"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListMetadataSchemasRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/metadataSchemas/{metadata_schema}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_metadata_schemas(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_metadata_schemas_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_metadata_schemas_sync.py
index e96ebe117d..cbb41bea60 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_metadata_schemas_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_metadata_schemas_sync.py
@@ -28,18 +28,18 @@
def sample_list_metadata_schemas():
- """Snippet for list_metadata_schemas"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListMetadataSchemasRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/metadataSchemas/{metadata_schema}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_metadata_schemas(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_metadata_stores_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_metadata_stores_async.py
index 7bd2b099da..77d498e68e 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_metadata_stores_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_metadata_stores_async.py
@@ -28,18 +28,18 @@
async def sample_list_metadata_stores():
- """Snippet for list_metadata_stores"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListMetadataStoresRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_metadata_stores(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_metadata_stores_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_metadata_stores_sync.py
index ed7e08d361..58134f72d9 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_metadata_stores_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_list_metadata_stores_sync.py
@@ -28,18 +28,18 @@
def sample_list_metadata_stores():
- """Snippet for list_metadata_stores"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListMetadataStoresRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_metadata_stores(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_artifacts_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_artifacts_async.py
index 8b638a5399..43800959ee 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_artifacts_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_artifacts_async.py
@@ -28,14 +28,12 @@
async def sample_purge_artifacts():
- """Snippet for purge_artifacts"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.PurgeArtifactsRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}",
+ parent="parent_value",
filter="filter_value",
)
@@ -45,6 +43,8 @@ async def sample_purge_artifacts():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_PurgeArtifacts_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_artifacts_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_artifacts_sync.py
index 5a1853798e..02f3b56add 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_artifacts_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_artifacts_sync.py
@@ -28,14 +28,12 @@
def sample_purge_artifacts():
- """Snippet for purge_artifacts"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.PurgeArtifactsRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}",
+ parent="parent_value",
filter="filter_value",
)
@@ -45,6 +43,8 @@ def sample_purge_artifacts():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_PurgeArtifacts_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_contexts_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_contexts_async.py
index c748e7952e..af235473c0 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_contexts_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_contexts_async.py
@@ -28,14 +28,12 @@
async def sample_purge_contexts():
- """Snippet for purge_contexts"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.PurgeContextsRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}",
+ parent="parent_value",
filter="filter_value",
)
@@ -45,6 +43,8 @@ async def sample_purge_contexts():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_PurgeContexts_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_contexts_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_contexts_sync.py
index da3061688b..59539f26c1 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_contexts_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_contexts_sync.py
@@ -28,14 +28,12 @@
def sample_purge_contexts():
- """Snippet for purge_contexts"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.PurgeContextsRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}",
+ parent="parent_value",
filter="filter_value",
)
@@ -45,6 +43,8 @@ def sample_purge_contexts():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_PurgeContexts_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_executions_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_executions_async.py
index e2f3dd4bed..34e4d6035c 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_executions_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_executions_async.py
@@ -28,14 +28,12 @@
async def sample_purge_executions():
- """Snippet for purge_executions"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.PurgeExecutionsRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}",
+ parent="parent_value",
filter="filter_value",
)
@@ -45,6 +43,8 @@ async def sample_purge_executions():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_PurgeExecutions_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_executions_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_executions_sync.py
index 92b5f13818..38471613ed 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_executions_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_executions_sync.py
@@ -28,14 +28,12 @@
def sample_purge_executions():
- """Snippet for purge_executions"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.PurgeExecutionsRequest(
- parent="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}",
+ parent="parent_value",
filter="filter_value",
)
@@ -45,6 +43,8 @@ def sample_purge_executions():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_PurgeExecutions_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_artifact_lineage_subgraph_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_artifact_lineage_subgraph_async.py
index 19f7e13f11..f18acb2d4f 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_artifact_lineage_subgraph_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_artifact_lineage_subgraph_async.py
@@ -28,20 +28,18 @@
async def sample_query_artifact_lineage_subgraph():
- """Snippet for query_artifact_lineage_subgraph"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.QueryArtifactLineageSubgraphRequest(
- artifact="projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}",
+ artifact="artifact_value",
)
# Make the request
response = await client.query_artifact_lineage_subgraph(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_QueryArtifactLineageSubgraph_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_artifact_lineage_subgraph_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_artifact_lineage_subgraph_sync.py
index 429f215869..e703c3aca2 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_artifact_lineage_subgraph_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_artifact_lineage_subgraph_sync.py
@@ -28,20 +28,18 @@
def sample_query_artifact_lineage_subgraph():
- """Snippet for query_artifact_lineage_subgraph"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.QueryArtifactLineageSubgraphRequest(
- artifact="projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}",
+ artifact="artifact_value",
)
# Make the request
response = client.query_artifact_lineage_subgraph(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_QueryArtifactLineageSubgraph_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_context_lineage_subgraph_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_context_lineage_subgraph_async.py
index 2ae3c6d9d4..ec19c145f1 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_context_lineage_subgraph_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_context_lineage_subgraph_async.py
@@ -28,20 +28,18 @@
async def sample_query_context_lineage_subgraph():
- """Snippet for query_context_lineage_subgraph"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.QueryContextLineageSubgraphRequest(
- context="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}",
+ context="context_value",
)
# Make the request
response = await client.query_context_lineage_subgraph(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_QueryContextLineageSubgraph_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_context_lineage_subgraph_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_context_lineage_subgraph_sync.py
index 6f35ad8471..cf83d420a0 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_context_lineage_subgraph_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_context_lineage_subgraph_sync.py
@@ -28,20 +28,18 @@
def sample_query_context_lineage_subgraph():
- """Snippet for query_context_lineage_subgraph"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.QueryContextLineageSubgraphRequest(
- context="projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}",
+ context="context_value",
)
# Make the request
response = client.query_context_lineage_subgraph(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_QueryContextLineageSubgraph_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_execution_inputs_and_outputs_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_execution_inputs_and_outputs_async.py
index 83f363a9fe..1be132fbb2 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_execution_inputs_and_outputs_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_execution_inputs_and_outputs_async.py
@@ -28,20 +28,18 @@
async def sample_query_execution_inputs_and_outputs():
- """Snippet for query_execution_inputs_and_outputs"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.QueryExecutionInputsAndOutputsRequest(
- execution="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}",
+ execution="execution_value",
)
# Make the request
response = await client.query_execution_inputs_and_outputs(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_QueryExecutionInputsAndOutputs_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_execution_inputs_and_outputs_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_execution_inputs_and_outputs_sync.py
index cad88b7795..35fb23c58b 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_execution_inputs_and_outputs_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_execution_inputs_and_outputs_sync.py
@@ -28,20 +28,18 @@
def sample_query_execution_inputs_and_outputs():
- """Snippet for query_execution_inputs_and_outputs"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.QueryExecutionInputsAndOutputsRequest(
- execution="projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}",
+ execution="execution_value",
)
# Make the request
response = client.query_execution_inputs_and_outputs(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_QueryExecutionInputsAndOutputs_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_update_artifact_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_update_artifact_async.py
index 63ac7ee417..e9ad64048d 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_update_artifact_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_update_artifact_async.py
@@ -28,8 +28,6 @@
async def sample_update_artifact():
- """Snippet for update_artifact"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceAsyncClient()
@@ -40,7 +38,7 @@ async def sample_update_artifact():
# Make the request
response = await client.update_artifact(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_UpdateArtifact_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_update_artifact_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_update_artifact_sync.py
index 496667dee9..6a26a780db 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_update_artifact_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_update_artifact_sync.py
@@ -28,8 +28,6 @@
def sample_update_artifact():
- """Snippet for update_artifact"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceClient()
@@ -40,7 +38,7 @@ def sample_update_artifact():
# Make the request
response = client.update_artifact(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_UpdateArtifact_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_update_context_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_update_context_async.py
index fa34b5732b..71371049f5 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_update_context_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_update_context_async.py
@@ -28,8 +28,6 @@
async def sample_update_context():
- """Snippet for update_context"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceAsyncClient()
@@ -40,7 +38,7 @@ async def sample_update_context():
# Make the request
response = await client.update_context(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_UpdateContext_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_update_context_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_update_context_sync.py
index 9b8f0ba09c..0d26ef7e5f 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_update_context_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_update_context_sync.py
@@ -28,8 +28,6 @@
def sample_update_context():
- """Snippet for update_context"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceClient()
@@ -40,7 +38,7 @@ def sample_update_context():
# Make the request
response = client.update_context(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_UpdateContext_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_update_execution_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_update_execution_async.py
index 00a8c37668..64a945b87b 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_update_execution_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_update_execution_async.py
@@ -28,8 +28,6 @@
async def sample_update_execution():
- """Snippet for update_execution"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceAsyncClient()
@@ -40,7 +38,7 @@ async def sample_update_execution():
# Make the request
response = await client.update_execution(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_UpdateExecution_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_update_execution_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_update_execution_sync.py
index 56d7f29fa8..45df9ca2e6 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_update_execution_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_update_execution_sync.py
@@ -28,8 +28,6 @@
def sample_update_execution():
- """Snippet for update_execution"""
-
# Create a client
client = aiplatform_v1beta1.MetadataServiceClient()
@@ -40,7 +38,7 @@ def sample_update_execution():
# Make the request
response = client.update_execution(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_UpdateExecution_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_migration_service_batch_migrate_resources_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_migration_service_batch_migrate_resources_async.py
index 6a7915984c..4f8646ee30 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_migration_service_batch_migrate_resources_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_migration_service_batch_migrate_resources_async.py
@@ -28,19 +28,17 @@
async def sample_batch_migrate_resources():
- """Snippet for batch_migrate_resources"""
-
# Create a client
client = aiplatform_v1beta1.MigrationServiceAsyncClient()
# Initialize request argument(s)
migrate_resource_requests = aiplatform_v1beta1.MigrateResourceRequest()
migrate_resource_requests.migrate_ml_engine_model_version_config.endpoint = "endpoint_value"
- migrate_resource_requests.migrate_ml_engine_model_version_config.model_version = "projects/{project}/models/{model}/versions/{version}"
+ migrate_resource_requests.migrate_ml_engine_model_version_config.model_version = "model_version_value"
migrate_resource_requests.migrate_ml_engine_model_version_config.model_display_name = "model_display_name_value"
request = aiplatform_v1beta1.BatchMigrateResourcesRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
migrate_resource_requests=migrate_resource_requests,
)
@@ -50,6 +48,8 @@ async def sample_batch_migrate_resources():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MigrationService_BatchMigrateResources_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_migration_service_batch_migrate_resources_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_migration_service_batch_migrate_resources_sync.py
index afa638a9e9..556e1a9a24 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_migration_service_batch_migrate_resources_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_migration_service_batch_migrate_resources_sync.py
@@ -28,19 +28,17 @@
def sample_batch_migrate_resources():
- """Snippet for batch_migrate_resources"""
-
# Create a client
client = aiplatform_v1beta1.MigrationServiceClient()
# Initialize request argument(s)
migrate_resource_requests = aiplatform_v1beta1.MigrateResourceRequest()
migrate_resource_requests.migrate_ml_engine_model_version_config.endpoint = "endpoint_value"
- migrate_resource_requests.migrate_ml_engine_model_version_config.model_version = "projects/{project}/models/{model}/versions/{version}"
+ migrate_resource_requests.migrate_ml_engine_model_version_config.model_version = "model_version_value"
migrate_resource_requests.migrate_ml_engine_model_version_config.model_display_name = "model_display_name_value"
request = aiplatform_v1beta1.BatchMigrateResourcesRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
migrate_resource_requests=migrate_resource_requests,
)
@@ -50,6 +48,8 @@ def sample_batch_migrate_resources():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MigrationService_BatchMigrateResources_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_migration_service_search_migratable_resources_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_migration_service_search_migratable_resources_async.py
index 58467b6c5f..1c5e64a794 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_migration_service_search_migratable_resources_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_migration_service_search_migratable_resources_async.py
@@ -28,18 +28,18 @@
async def sample_search_migratable_resources():
- """Snippet for search_migratable_resources"""
-
# Create a client
client = aiplatform_v1beta1.MigrationServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.SearchMigratableResourcesRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.search_migratable_resources(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_migration_service_search_migratable_resources_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_migration_service_search_migratable_resources_sync.py
index 3058281e75..a849bd9c48 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_migration_service_search_migratable_resources_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_migration_service_search_migratable_resources_sync.py
@@ -28,18 +28,18 @@
def sample_search_migratable_resources():
- """Snippet for search_migratable_resources"""
-
# Create a client
client = aiplatform_v1beta1.MigrationServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.SearchMigratableResourcesRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.search_migratable_resources(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_delete_model_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_delete_model_async.py
index e1357242f1..9ba1768819 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_delete_model_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_delete_model_async.py
@@ -28,14 +28,12 @@
async def sample_delete_model():
- """Snippet for delete_model"""
-
# Create a client
client = aiplatform_v1beta1.ModelServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteModelRequest(
- name="projects/{project}/locations/{location}/models/{model}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_model():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_ModelService_DeleteModel_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_delete_model_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_delete_model_sync.py
index be2dde9566..e3813f14d0 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_delete_model_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_delete_model_sync.py
@@ -28,14 +28,12 @@
def sample_delete_model():
- """Snippet for delete_model"""
-
# Create a client
client = aiplatform_v1beta1.ModelServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteModelRequest(
- name="projects/{project}/locations/{location}/models/{model}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_model():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_ModelService_DeleteModel_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_export_model_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_export_model_async.py
index 19f9b1d90d..5b3a1f5831 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_export_model_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_export_model_async.py
@@ -28,14 +28,12 @@
async def sample_export_model():
- """Snippet for export_model"""
-
# Create a client
client = aiplatform_v1beta1.ModelServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ExportModelRequest(
- name="projects/{project}/locations/{location}/models/{model}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_export_model():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_ModelService_ExportModel_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_export_model_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_export_model_sync.py
index a8c2c049c2..db7eab2ae4 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_export_model_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_export_model_sync.py
@@ -28,14 +28,12 @@
def sample_export_model():
- """Snippet for export_model"""
-
# Create a client
client = aiplatform_v1beta1.ModelServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ExportModelRequest(
- name="projects/{project}/locations/{location}/models/{model}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_export_model():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_ModelService_ExportModel_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_get_model_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_get_model_async.py
index 3b2bf14075..6deba97ad6 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_get_model_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_get_model_async.py
@@ -28,20 +28,18 @@
async def sample_get_model():
- """Snippet for get_model"""
-
# Create a client
client = aiplatform_v1beta1.ModelServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetModelRequest(
- name="projects/{project}/locations/{location}/models/{model}",
+ name="name_value",
)
# Make the request
response = await client.get_model(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_ModelService_GetModel_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_get_model_evaluation_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_get_model_evaluation_async.py
index cf4eddc5a6..4ac204dd82 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_get_model_evaluation_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_get_model_evaluation_async.py
@@ -28,20 +28,18 @@
async def sample_get_model_evaluation():
- """Snippet for get_model_evaluation"""
-
# Create a client
client = aiplatform_v1beta1.ModelServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetModelEvaluationRequest(
- name="projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}",
+ name="name_value",
)
# Make the request
response = await client.get_model_evaluation(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_ModelService_GetModelEvaluation_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_get_model_evaluation_slice_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_get_model_evaluation_slice_async.py
index 8d5fdee714..5ed989e347 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_get_model_evaluation_slice_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_get_model_evaluation_slice_async.py
@@ -28,20 +28,18 @@
async def sample_get_model_evaluation_slice():
- """Snippet for get_model_evaluation_slice"""
-
# Create a client
client = aiplatform_v1beta1.ModelServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetModelEvaluationSliceRequest(
- name="projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}",
+ name="name_value",
)
# Make the request
response = await client.get_model_evaluation_slice(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_ModelService_GetModelEvaluationSlice_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_get_model_evaluation_slice_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_get_model_evaluation_slice_sync.py
index 7f16fbd682..8a416aaca9 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_get_model_evaluation_slice_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_get_model_evaluation_slice_sync.py
@@ -28,20 +28,18 @@
def sample_get_model_evaluation_slice():
- """Snippet for get_model_evaluation_slice"""
-
# Create a client
client = aiplatform_v1beta1.ModelServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetModelEvaluationSliceRequest(
- name="projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}",
+ name="name_value",
)
# Make the request
response = client.get_model_evaluation_slice(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_ModelService_GetModelEvaluationSlice_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_get_model_evaluation_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_get_model_evaluation_sync.py
index 7f5fea9376..2025e2c541 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_get_model_evaluation_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_get_model_evaluation_sync.py
@@ -28,20 +28,18 @@
def sample_get_model_evaluation():
- """Snippet for get_model_evaluation"""
-
# Create a client
client = aiplatform_v1beta1.ModelServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetModelEvaluationRequest(
- name="projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}",
+ name="name_value",
)
# Make the request
response = client.get_model_evaluation(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_ModelService_GetModelEvaluation_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_get_model_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_get_model_sync.py
index 43e04d2322..b6d579bc0d 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_get_model_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_get_model_sync.py
@@ -28,20 +28,18 @@
def sample_get_model():
- """Snippet for get_model"""
-
# Create a client
client = aiplatform_v1beta1.ModelServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetModelRequest(
- name="projects/{project}/locations/{location}/models/{model}",
+ name="name_value",
)
# Make the request
response = client.get_model(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_ModelService_GetModel_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_list_model_evaluation_slices_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_list_model_evaluation_slices_async.py
index a18053c7af..fc5bc559b8 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_list_model_evaluation_slices_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_list_model_evaluation_slices_async.py
@@ -28,18 +28,18 @@
async def sample_list_model_evaluation_slices():
- """Snippet for list_model_evaluation_slices"""
-
# Create a client
client = aiplatform_v1beta1.ModelServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListModelEvaluationSlicesRequest(
- parent="projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_model_evaluation_slices(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_list_model_evaluation_slices_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_list_model_evaluation_slices_sync.py
index 1872eaa589..7349ca505f 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_list_model_evaluation_slices_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_list_model_evaluation_slices_sync.py
@@ -28,18 +28,18 @@
def sample_list_model_evaluation_slices():
- """Snippet for list_model_evaluation_slices"""
-
# Create a client
client = aiplatform_v1beta1.ModelServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListModelEvaluationSlicesRequest(
- parent="projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_model_evaluation_slices(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_list_model_evaluations_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_list_model_evaluations_async.py
index 3e9aaa1089..6337ddf930 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_list_model_evaluations_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_list_model_evaluations_async.py
@@ -28,18 +28,18 @@
async def sample_list_model_evaluations():
- """Snippet for list_model_evaluations"""
-
# Create a client
client = aiplatform_v1beta1.ModelServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListModelEvaluationsRequest(
- parent="projects/{project}/locations/{location}/models/{model}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_model_evaluations(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_list_model_evaluations_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_list_model_evaluations_sync.py
index b8c17b2444..83b8313d1d 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_list_model_evaluations_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_list_model_evaluations_sync.py
@@ -28,18 +28,18 @@
def sample_list_model_evaluations():
- """Snippet for list_model_evaluations"""
-
# Create a client
client = aiplatform_v1beta1.ModelServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListModelEvaluationsRequest(
- parent="projects/{project}/locations/{location}/models/{model}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_model_evaluations(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_list_models_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_list_models_async.py
index 356823e292..ed585472d5 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_list_models_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_list_models_async.py
@@ -28,18 +28,18 @@
async def sample_list_models():
- """Snippet for list_models"""
-
# Create a client
client = aiplatform_v1beta1.ModelServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListModelsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_models(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_list_models_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_list_models_sync.py
index 8ba29af8ba..95be3acdea 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_list_models_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_list_models_sync.py
@@ -28,18 +28,18 @@
def sample_list_models():
- """Snippet for list_models"""
-
# Create a client
client = aiplatform_v1beta1.ModelServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListModelsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_models(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_update_model_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_update_model_async.py
index 5cd1f0e324..90e3df5378 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_update_model_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_update_model_async.py
@@ -28,8 +28,6 @@
async def sample_update_model():
- """Snippet for update_model"""
-
# Create a client
client = aiplatform_v1beta1.ModelServiceAsyncClient()
@@ -44,7 +42,7 @@ async def sample_update_model():
# Make the request
response = await client.update_model(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_ModelService_UpdateModel_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_update_model_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_update_model_sync.py
index 79784d6664..c88c1b6bc3 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_update_model_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_update_model_sync.py
@@ -28,8 +28,6 @@
def sample_update_model():
- """Snippet for update_model"""
-
# Create a client
client = aiplatform_v1beta1.ModelServiceClient()
@@ -44,7 +42,7 @@ def sample_update_model():
# Make the request
response = client.update_model(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_ModelService_UpdateModel_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_upload_model_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_upload_model_async.py
index 38fa55d00a..ba797eecba 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_upload_model_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_upload_model_async.py
@@ -28,8 +28,6 @@
async def sample_upload_model():
- """Snippet for upload_model"""
-
# Create a client
client = aiplatform_v1beta1.ModelServiceAsyncClient()
@@ -38,7 +36,7 @@ async def sample_upload_model():
model.display_name = "display_name_value"
request = aiplatform_v1beta1.UploadModelRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
model=model,
)
@@ -48,6 +46,8 @@ async def sample_upload_model():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_ModelService_UploadModel_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_upload_model_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_upload_model_sync.py
index c80f118900..a7356ba37a 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_upload_model_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_model_service_upload_model_sync.py
@@ -28,8 +28,6 @@
def sample_upload_model():
- """Snippet for upload_model"""
-
# Create a client
client = aiplatform_v1beta1.ModelServiceClient()
@@ -38,7 +36,7 @@ def sample_upload_model():
model.display_name = "display_name_value"
request = aiplatform_v1beta1.UploadModelRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
model=model,
)
@@ -48,6 +46,8 @@ def sample_upload_model():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_ModelService_UploadModel_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_cancel_pipeline_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_cancel_pipeline_job_async.py
index 91e3e3eda3..7cada005a0 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_cancel_pipeline_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_cancel_pipeline_job_async.py
@@ -28,18 +28,16 @@
async def sample_cancel_pipeline_job():
- """Snippet for cancel_pipeline_job"""
-
# Create a client
client = aiplatform_v1beta1.PipelineServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.CancelPipelineJobRequest(
- name="projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}",
+ name="name_value",
)
# Make the request
- response = await client.cancel_pipeline_job(request=request)
+ await client.cancel_pipeline_job(request=request)
# [END aiplatform_generated_aiplatform_v1beta1_PipelineService_CancelPipelineJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_cancel_pipeline_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_cancel_pipeline_job_sync.py
index aceae4b15c..5375706ada 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_cancel_pipeline_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_cancel_pipeline_job_sync.py
@@ -28,18 +28,16 @@
def sample_cancel_pipeline_job():
- """Snippet for cancel_pipeline_job"""
-
# Create a client
client = aiplatform_v1beta1.PipelineServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.CancelPipelineJobRequest(
- name="projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}",
+ name="name_value",
)
# Make the request
- response = client.cancel_pipeline_job(request=request)
+ client.cancel_pipeline_job(request=request)
# [END aiplatform_generated_aiplatform_v1beta1_PipelineService_CancelPipelineJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_cancel_training_pipeline_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_cancel_training_pipeline_async.py
index 502d8d55e7..6d5e072c32 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_cancel_training_pipeline_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_cancel_training_pipeline_async.py
@@ -28,18 +28,16 @@
async def sample_cancel_training_pipeline():
- """Snippet for cancel_training_pipeline"""
-
# Create a client
client = aiplatform_v1beta1.PipelineServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.CancelTrainingPipelineRequest(
- name="projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}",
+ name="name_value",
)
# Make the request
- response = await client.cancel_training_pipeline(request=request)
+ await client.cancel_training_pipeline(request=request)
# [END aiplatform_generated_aiplatform_v1beta1_PipelineService_CancelTrainingPipeline_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_cancel_training_pipeline_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_cancel_training_pipeline_sync.py
index e238c62117..53057bf1fb 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_cancel_training_pipeline_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_cancel_training_pipeline_sync.py
@@ -28,18 +28,16 @@
def sample_cancel_training_pipeline():
- """Snippet for cancel_training_pipeline"""
-
# Create a client
client = aiplatform_v1beta1.PipelineServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.CancelTrainingPipelineRequest(
- name="projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}",
+ name="name_value",
)
# Make the request
- response = client.cancel_training_pipeline(request=request)
+ client.cancel_training_pipeline(request=request)
# [END aiplatform_generated_aiplatform_v1beta1_PipelineService_CancelTrainingPipeline_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_create_pipeline_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_create_pipeline_job_async.py
index 0980ed6b47..1267b7299e 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_create_pipeline_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_create_pipeline_job_async.py
@@ -28,20 +28,18 @@
async def sample_create_pipeline_job():
- """Snippet for create_pipeline_job"""
-
# Create a client
client = aiplatform_v1beta1.PipelineServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.CreatePipelineJobRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
response = await client.create_pipeline_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_PipelineService_CreatePipelineJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_create_pipeline_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_create_pipeline_job_sync.py
index 98167e6610..a4e688126d 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_create_pipeline_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_create_pipeline_job_sync.py
@@ -28,20 +28,18 @@
def sample_create_pipeline_job():
- """Snippet for create_pipeline_job"""
-
# Create a client
client = aiplatform_v1beta1.PipelineServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.CreatePipelineJobRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
response = client.create_pipeline_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_PipelineService_CreatePipelineJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_create_training_pipeline_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_create_training_pipeline_async.py
index 22a1c692bc..2db5d45d03 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_create_training_pipeline_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_create_training_pipeline_async.py
@@ -28,8 +28,6 @@
async def sample_create_training_pipeline():
- """Snippet for create_training_pipeline"""
-
# Create a client
client = aiplatform_v1beta1.PipelineServiceAsyncClient()
@@ -40,14 +38,14 @@ async def sample_create_training_pipeline():
training_pipeline.training_task_inputs.null_value = "NULL_VALUE"
request = aiplatform_v1beta1.CreateTrainingPipelineRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
training_pipeline=training_pipeline,
)
# Make the request
response = await client.create_training_pipeline(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_PipelineService_CreateTrainingPipeline_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_create_training_pipeline_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_create_training_pipeline_sync.py
index d8c782b3d3..1529fa505b 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_create_training_pipeline_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_create_training_pipeline_sync.py
@@ -28,8 +28,6 @@
def sample_create_training_pipeline():
- """Snippet for create_training_pipeline"""
-
# Create a client
client = aiplatform_v1beta1.PipelineServiceClient()
@@ -40,14 +38,14 @@ def sample_create_training_pipeline():
training_pipeline.training_task_inputs.null_value = "NULL_VALUE"
request = aiplatform_v1beta1.CreateTrainingPipelineRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
training_pipeline=training_pipeline,
)
# Make the request
response = client.create_training_pipeline(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_PipelineService_CreateTrainingPipeline_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_delete_pipeline_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_delete_pipeline_job_async.py
index e7c007a751..2edd009275 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_delete_pipeline_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_delete_pipeline_job_async.py
@@ -28,14 +28,12 @@
async def sample_delete_pipeline_job():
- """Snippet for delete_pipeline_job"""
-
# Create a client
client = aiplatform_v1beta1.PipelineServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeletePipelineJobRequest(
- name="projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_pipeline_job():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_PipelineService_DeletePipelineJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_delete_pipeline_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_delete_pipeline_job_sync.py
index 40fdea4ecc..f0722c88ac 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_delete_pipeline_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_delete_pipeline_job_sync.py
@@ -28,14 +28,12 @@
def sample_delete_pipeline_job():
- """Snippet for delete_pipeline_job"""
-
# Create a client
client = aiplatform_v1beta1.PipelineServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeletePipelineJobRequest(
- name="projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_pipeline_job():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_PipelineService_DeletePipelineJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_delete_training_pipeline_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_delete_training_pipeline_async.py
index ad54be5b5c..28afc78ed2 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_delete_training_pipeline_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_delete_training_pipeline_async.py
@@ -28,14 +28,12 @@
async def sample_delete_training_pipeline():
- """Snippet for delete_training_pipeline"""
-
# Create a client
client = aiplatform_v1beta1.PipelineServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteTrainingPipelineRequest(
- name="projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_training_pipeline():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_PipelineService_DeleteTrainingPipeline_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_delete_training_pipeline_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_delete_training_pipeline_sync.py
index 02b3652bd7..098b704abe 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_delete_training_pipeline_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_delete_training_pipeline_sync.py
@@ -28,14 +28,12 @@
def sample_delete_training_pipeline():
- """Snippet for delete_training_pipeline"""
-
# Create a client
client = aiplatform_v1beta1.PipelineServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteTrainingPipelineRequest(
- name="projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_training_pipeline():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_PipelineService_DeleteTrainingPipeline_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_get_pipeline_job_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_get_pipeline_job_async.py
index 30aaa02532..2f4230c051 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_get_pipeline_job_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_get_pipeline_job_async.py
@@ -28,20 +28,18 @@
async def sample_get_pipeline_job():
- """Snippet for get_pipeline_job"""
-
# Create a client
client = aiplatform_v1beta1.PipelineServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetPipelineJobRequest(
- name="projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}",
+ name="name_value",
)
# Make the request
response = await client.get_pipeline_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_PipelineService_GetPipelineJob_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_get_pipeline_job_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_get_pipeline_job_sync.py
index 83abd183b5..d1c3fd29a0 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_get_pipeline_job_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_get_pipeline_job_sync.py
@@ -28,20 +28,18 @@
def sample_get_pipeline_job():
- """Snippet for get_pipeline_job"""
-
# Create a client
client = aiplatform_v1beta1.PipelineServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetPipelineJobRequest(
- name="projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}",
+ name="name_value",
)
# Make the request
response = client.get_pipeline_job(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_PipelineService_GetPipelineJob_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_get_training_pipeline_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_get_training_pipeline_async.py
index 30ecee0125..6848da3dea 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_get_training_pipeline_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_get_training_pipeline_async.py
@@ -28,20 +28,18 @@
async def sample_get_training_pipeline():
- """Snippet for get_training_pipeline"""
-
# Create a client
client = aiplatform_v1beta1.PipelineServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetTrainingPipelineRequest(
- name="projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}",
+ name="name_value",
)
# Make the request
response = await client.get_training_pipeline(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_PipelineService_GetTrainingPipeline_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_get_training_pipeline_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_get_training_pipeline_sync.py
index a806d9cf00..a9bda5bcb1 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_get_training_pipeline_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_get_training_pipeline_sync.py
@@ -28,20 +28,18 @@
def sample_get_training_pipeline():
- """Snippet for get_training_pipeline"""
-
# Create a client
client = aiplatform_v1beta1.PipelineServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetTrainingPipelineRequest(
- name="projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}",
+ name="name_value",
)
# Make the request
response = client.get_training_pipeline(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_PipelineService_GetTrainingPipeline_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_list_pipeline_jobs_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_list_pipeline_jobs_async.py
index f1c469d903..1b2e66e233 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_list_pipeline_jobs_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_list_pipeline_jobs_async.py
@@ -28,18 +28,18 @@
async def sample_list_pipeline_jobs():
- """Snippet for list_pipeline_jobs"""
-
# Create a client
client = aiplatform_v1beta1.PipelineServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListPipelineJobsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_pipeline_jobs(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_list_pipeline_jobs_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_list_pipeline_jobs_sync.py
index b8ae150001..313773eeea 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_list_pipeline_jobs_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_list_pipeline_jobs_sync.py
@@ -28,18 +28,18 @@
def sample_list_pipeline_jobs():
- """Snippet for list_pipeline_jobs"""
-
# Create a client
client = aiplatform_v1beta1.PipelineServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListPipelineJobsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_pipeline_jobs(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_list_training_pipelines_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_list_training_pipelines_async.py
index fa7c44c6bf..6696e8ab27 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_list_training_pipelines_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_list_training_pipelines_async.py
@@ -28,18 +28,18 @@
async def sample_list_training_pipelines():
- """Snippet for list_training_pipelines"""
-
# Create a client
client = aiplatform_v1beta1.PipelineServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListTrainingPipelinesRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_training_pipelines(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_list_training_pipelines_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_list_training_pipelines_sync.py
index 4dcb59bfc1..30b874d6dc 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_list_training_pipelines_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_pipeline_service_list_training_pipelines_sync.py
@@ -28,18 +28,18 @@
def sample_list_training_pipelines():
- """Snippet for list_training_pipelines"""
-
# Create a client
client = aiplatform_v1beta1.PipelineServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListTrainingPipelinesRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_training_pipelines(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_prediction_service_explain_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_prediction_service_explain_async.py
index eff6b59e0d..02d9abe0bf 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_prediction_service_explain_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_prediction_service_explain_async.py
@@ -28,8 +28,6 @@
async def sample_explain():
- """Snippet for explain"""
-
# Create a client
client = aiplatform_v1beta1.PredictionServiceAsyncClient()
@@ -38,14 +36,14 @@ async def sample_explain():
instances.null_value = "NULL_VALUE"
request = aiplatform_v1beta1.ExplainRequest(
- endpoint="projects/{project}/locations/{location}/endpoints/{endpoint}",
+ endpoint="endpoint_value",
instances=instances,
)
# Make the request
response = await client.explain(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_PredictionService_Explain_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_prediction_service_explain_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_prediction_service_explain_sync.py
index 3e4ad4beec..d0993cb167 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_prediction_service_explain_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_prediction_service_explain_sync.py
@@ -28,8 +28,6 @@
def sample_explain():
- """Snippet for explain"""
-
# Create a client
client = aiplatform_v1beta1.PredictionServiceClient()
@@ -38,14 +36,14 @@ def sample_explain():
instances.null_value = "NULL_VALUE"
request = aiplatform_v1beta1.ExplainRequest(
- endpoint="projects/{project}/locations/{location}/endpoints/{endpoint}",
+ endpoint="endpoint_value",
instances=instances,
)
# Make the request
response = client.explain(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_PredictionService_Explain_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_prediction_service_predict_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_prediction_service_predict_async.py
index b71defd380..013e9b6ad4 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_prediction_service_predict_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_prediction_service_predict_async.py
@@ -28,8 +28,6 @@
async def sample_predict():
- """Snippet for predict"""
-
# Create a client
client = aiplatform_v1beta1.PredictionServiceAsyncClient()
@@ -38,14 +36,14 @@ async def sample_predict():
instances.null_value = "NULL_VALUE"
request = aiplatform_v1beta1.PredictRequest(
- endpoint="projects/{project}/locations/{location}/endpoints/{endpoint}",
+ endpoint="endpoint_value",
instances=instances,
)
# Make the request
response = await client.predict(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_PredictionService_Predict_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_prediction_service_predict_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_prediction_service_predict_sync.py
index 4098f767d8..9683fc0060 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_prediction_service_predict_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_prediction_service_predict_sync.py
@@ -28,8 +28,6 @@
def sample_predict():
- """Snippet for predict"""
-
# Create a client
client = aiplatform_v1beta1.PredictionServiceClient()
@@ -38,14 +36,14 @@ def sample_predict():
instances.null_value = "NULL_VALUE"
request = aiplatform_v1beta1.PredictRequest(
- endpoint="projects/{project}/locations/{location}/endpoints/{endpoint}",
+ endpoint="endpoint_value",
instances=instances,
)
# Make the request
response = client.predict(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_PredictionService_Predict_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_prediction_service_raw_predict_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_prediction_service_raw_predict_async.py
index 7afabd752f..f32c62bb08 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_prediction_service_raw_predict_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_prediction_service_raw_predict_async.py
@@ -28,20 +28,18 @@
async def sample_raw_predict():
- """Snippet for raw_predict"""
-
# Create a client
client = aiplatform_v1beta1.PredictionServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.RawPredictRequest(
- endpoint="projects/{project}/locations/{location}/endpoints/{endpoint}",
+ endpoint="endpoint_value",
)
# Make the request
response = await client.raw_predict(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_PredictionService_RawPredict_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_prediction_service_raw_predict_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_prediction_service_raw_predict_sync.py
index 25e99ab964..5517055e9b 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_prediction_service_raw_predict_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_prediction_service_raw_predict_sync.py
@@ -28,20 +28,18 @@
def sample_raw_predict():
- """Snippet for raw_predict"""
-
# Create a client
client = aiplatform_v1beta1.PredictionServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.RawPredictRequest(
- endpoint="projects/{project}/locations/{location}/endpoints/{endpoint}",
+ endpoint="endpoint_value",
)
# Make the request
response = client.raw_predict(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_PredictionService_RawPredict_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_create_specialist_pool_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_create_specialist_pool_async.py
index adbe42cb29..c731adc24b 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_create_specialist_pool_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_create_specialist_pool_async.py
@@ -28,8 +28,6 @@
async def sample_create_specialist_pool():
- """Snippet for create_specialist_pool"""
-
# Create a client
client = aiplatform_v1beta1.SpecialistPoolServiceAsyncClient()
@@ -39,7 +37,7 @@ async def sample_create_specialist_pool():
specialist_pool.display_name = "display_name_value"
request = aiplatform_v1beta1.CreateSpecialistPoolRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
specialist_pool=specialist_pool,
)
@@ -49,6 +47,8 @@ async def sample_create_specialist_pool():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_SpecialistPoolService_CreateSpecialistPool_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_create_specialist_pool_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_create_specialist_pool_sync.py
index f5eeca7309..2f76f71f9e 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_create_specialist_pool_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_create_specialist_pool_sync.py
@@ -28,8 +28,6 @@
def sample_create_specialist_pool():
- """Snippet for create_specialist_pool"""
-
# Create a client
client = aiplatform_v1beta1.SpecialistPoolServiceClient()
@@ -39,7 +37,7 @@ def sample_create_specialist_pool():
specialist_pool.display_name = "display_name_value"
request = aiplatform_v1beta1.CreateSpecialistPoolRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
specialist_pool=specialist_pool,
)
@@ -49,6 +47,8 @@ def sample_create_specialist_pool():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_SpecialistPoolService_CreateSpecialistPool_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_delete_specialist_pool_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_delete_specialist_pool_async.py
index 21731c2170..54902aeeef 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_delete_specialist_pool_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_delete_specialist_pool_async.py
@@ -28,14 +28,12 @@
async def sample_delete_specialist_pool():
- """Snippet for delete_specialist_pool"""
-
# Create a client
client = aiplatform_v1beta1.SpecialistPoolServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteSpecialistPoolRequest(
- name="projects/{project}/locations/{location}/specialistPools/{specialist_pool}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_specialist_pool():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_SpecialistPoolService_DeleteSpecialistPool_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_delete_specialist_pool_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_delete_specialist_pool_sync.py
index c14e821135..40581a02c8 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_delete_specialist_pool_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_delete_specialist_pool_sync.py
@@ -28,14 +28,12 @@
def sample_delete_specialist_pool():
- """Snippet for delete_specialist_pool"""
-
# Create a client
client = aiplatform_v1beta1.SpecialistPoolServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteSpecialistPoolRequest(
- name="projects/{project}/locations/{location}/specialistPools/{specialist_pool}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_specialist_pool():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_SpecialistPoolService_DeleteSpecialistPool_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_get_specialist_pool_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_get_specialist_pool_async.py
index bd4ce3bd79..ad4d8ca6ac 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_get_specialist_pool_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_get_specialist_pool_async.py
@@ -28,20 +28,18 @@
async def sample_get_specialist_pool():
- """Snippet for get_specialist_pool"""
-
# Create a client
client = aiplatform_v1beta1.SpecialistPoolServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetSpecialistPoolRequest(
- name="projects/{project}/locations/{location}/specialistPools/{specialist_pool}",
+ name="name_value",
)
# Make the request
response = await client.get_specialist_pool(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_SpecialistPoolService_GetSpecialistPool_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_get_specialist_pool_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_get_specialist_pool_sync.py
index e7322ffc3e..4bc71e0a3d 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_get_specialist_pool_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_get_specialist_pool_sync.py
@@ -28,20 +28,18 @@
def sample_get_specialist_pool():
- """Snippet for get_specialist_pool"""
-
# Create a client
client = aiplatform_v1beta1.SpecialistPoolServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetSpecialistPoolRequest(
- name="projects/{project}/locations/{location}/specialistPools/{specialist_pool}",
+ name="name_value",
)
# Make the request
response = client.get_specialist_pool(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_SpecialistPoolService_GetSpecialistPool_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_list_specialist_pools_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_list_specialist_pools_async.py
index 4bf9d554f0..729ff7aa78 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_list_specialist_pools_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_list_specialist_pools_async.py
@@ -28,18 +28,18 @@
async def sample_list_specialist_pools():
- """Snippet for list_specialist_pools"""
-
# Create a client
client = aiplatform_v1beta1.SpecialistPoolServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListSpecialistPoolsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_specialist_pools(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_list_specialist_pools_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_list_specialist_pools_sync.py
index 1fd1ab6349..bfbf0edfe4 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_list_specialist_pools_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_list_specialist_pools_sync.py
@@ -28,18 +28,18 @@
def sample_list_specialist_pools():
- """Snippet for list_specialist_pools"""
-
# Create a client
client = aiplatform_v1beta1.SpecialistPoolServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListSpecialistPoolsRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_specialist_pools(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_update_specialist_pool_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_update_specialist_pool_async.py
index 977b1ac7e4..1fccb2c7f4 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_update_specialist_pool_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_update_specialist_pool_async.py
@@ -28,8 +28,6 @@
async def sample_update_specialist_pool():
- """Snippet for update_specialist_pool"""
-
# Create a client
client = aiplatform_v1beta1.SpecialistPoolServiceAsyncClient()
@@ -48,6 +46,8 @@ async def sample_update_specialist_pool():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_SpecialistPoolService_UpdateSpecialistPool_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_update_specialist_pool_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_update_specialist_pool_sync.py
index af22d361c2..cf4841db68 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_update_specialist_pool_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_update_specialist_pool_sync.py
@@ -28,8 +28,6 @@
def sample_update_specialist_pool():
- """Snippet for update_specialist_pool"""
-
# Create a client
client = aiplatform_v1beta1.SpecialistPoolServiceClient()
@@ -48,6 +46,8 @@ def sample_update_specialist_pool():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_SpecialistPoolService_UpdateSpecialistPool_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_create_tensorboard_runs_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_create_tensorboard_runs_async.py
index 61b91d8bcf..0499deb0a8 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_create_tensorboard_runs_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_create_tensorboard_runs_async.py
@@ -28,26 +28,24 @@
async def sample_batch_create_tensorboard_runs():
- """Snippet for batch_create_tensorboard_runs"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceAsyncClient()
# Initialize request argument(s)
requests = aiplatform_v1beta1.CreateTensorboardRunRequest()
- requests.parent = "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}"
+ requests.parent = "parent_value"
requests.tensorboard_run.display_name = "display_name_value"
requests.tensorboard_run_id = "tensorboard_run_id_value"
request = aiplatform_v1beta1.BatchCreateTensorboardRunsRequest(
- parent="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}",
+ parent="parent_value",
requests=requests,
)
# Make the request
response = await client.batch_create_tensorboard_runs(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_BatchCreateTensorboardRuns_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_create_tensorboard_runs_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_create_tensorboard_runs_sync.py
index 7f65b09e3f..c6827d7a4a 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_create_tensorboard_runs_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_create_tensorboard_runs_sync.py
@@ -28,26 +28,24 @@
def sample_batch_create_tensorboard_runs():
- """Snippet for batch_create_tensorboard_runs"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceClient()
# Initialize request argument(s)
requests = aiplatform_v1beta1.CreateTensorboardRunRequest()
- requests.parent = "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}"
+ requests.parent = "parent_value"
requests.tensorboard_run.display_name = "display_name_value"
requests.tensorboard_run_id = "tensorboard_run_id_value"
request = aiplatform_v1beta1.BatchCreateTensorboardRunsRequest(
- parent="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}",
+ parent="parent_value",
requests=requests,
)
# Make the request
response = client.batch_create_tensorboard_runs(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_BatchCreateTensorboardRuns_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_create_tensorboard_time_series_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_create_tensorboard_time_series_async.py
index e17850bc4b..87213a466a 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_create_tensorboard_time_series_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_create_tensorboard_time_series_async.py
@@ -28,26 +28,24 @@
async def sample_batch_create_tensorboard_time_series():
- """Snippet for batch_create_tensorboard_time_series"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceAsyncClient()
# Initialize request argument(s)
requests = aiplatform_v1beta1.CreateTensorboardTimeSeriesRequest()
- requests.parent = "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}"
+ requests.parent = "parent_value"
requests.tensorboard_time_series.display_name = "display_name_value"
requests.tensorboard_time_series.value_type = "BLOB_SEQUENCE"
request = aiplatform_v1beta1.BatchCreateTensorboardTimeSeriesRequest(
- parent="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}",
+ parent="parent_value",
requests=requests,
)
# Make the request
response = await client.batch_create_tensorboard_time_series(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_BatchCreateTensorboardTimeSeries_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_create_tensorboard_time_series_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_create_tensorboard_time_series_sync.py
index 81bc133426..dff96e34e4 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_create_tensorboard_time_series_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_create_tensorboard_time_series_sync.py
@@ -28,26 +28,24 @@
def sample_batch_create_tensorboard_time_series():
- """Snippet for batch_create_tensorboard_time_series"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceClient()
# Initialize request argument(s)
requests = aiplatform_v1beta1.CreateTensorboardTimeSeriesRequest()
- requests.parent = "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}"
+ requests.parent = "parent_value"
requests.tensorboard_time_series.display_name = "display_name_value"
requests.tensorboard_time_series.value_type = "BLOB_SEQUENCE"
request = aiplatform_v1beta1.BatchCreateTensorboardTimeSeriesRequest(
- parent="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}",
+ parent="parent_value",
requests=requests,
)
# Make the request
response = client.batch_create_tensorboard_time_series(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_BatchCreateTensorboardTimeSeries_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_read_tensorboard_time_series_data_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_read_tensorboard_time_series_data_async.py
index 6394cdc822..49945f7b54 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_read_tensorboard_time_series_data_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_read_tensorboard_time_series_data_async.py
@@ -28,21 +28,19 @@
async def sample_batch_read_tensorboard_time_series_data():
- """Snippet for batch_read_tensorboard_time_series_data"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.BatchReadTensorboardTimeSeriesDataRequest(
- tensorboard="projects/{project}/locations/{location}/tensorboards/{tensorboard}",
- time_series="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}",
+ tensorboard="tensorboard_value",
+ time_series=['time_series_value_1', 'time_series_value_2'],
)
# Make the request
response = await client.batch_read_tensorboard_time_series_data(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_BatchReadTensorboardTimeSeriesData_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_read_tensorboard_time_series_data_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_read_tensorboard_time_series_data_sync.py
index ada1d6d09e..a90b7a75b2 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_read_tensorboard_time_series_data_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_read_tensorboard_time_series_data_sync.py
@@ -28,21 +28,19 @@
def sample_batch_read_tensorboard_time_series_data():
- """Snippet for batch_read_tensorboard_time_series_data"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.BatchReadTensorboardTimeSeriesDataRequest(
- tensorboard="projects/{project}/locations/{location}/tensorboards/{tensorboard}",
- time_series="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}",
+ tensorboard="tensorboard_value",
+ time_series=['time_series_value_1', 'time_series_value_2'],
)
# Make the request
response = client.batch_read_tensorboard_time_series_data(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_BatchReadTensorboardTimeSeriesData_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_async.py
index 627cc0b525..fe5403e89d 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_async.py
@@ -28,8 +28,6 @@
async def sample_create_tensorboard():
- """Snippet for create_tensorboard"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceAsyncClient()
@@ -38,7 +36,7 @@ async def sample_create_tensorboard():
tensorboard.display_name = "display_name_value"
request = aiplatform_v1beta1.CreateTensorboardRequest(
- parent="projects/{project}/locations/{location}/tensorboards/{tensorboard}",
+ parent="parent_value",
tensorboard=tensorboard,
)
@@ -48,6 +46,8 @@ async def sample_create_tensorboard():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_CreateTensorboard_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_experiment_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_experiment_async.py
index 393ca36e77..32a1e3f31c 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_experiment_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_experiment_async.py
@@ -28,21 +28,19 @@
async def sample_create_tensorboard_experiment():
- """Snippet for create_tensorboard_experiment"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.CreateTensorboardExperimentRequest(
- parent="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}",
+ parent="parent_value",
tensorboard_experiment_id="tensorboard_experiment_id_value",
)
# Make the request
response = await client.create_tensorboard_experiment(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_CreateTensorboardExperiment_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_experiment_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_experiment_sync.py
index d1844dd90f..2cf3eebfdd 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_experiment_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_experiment_sync.py
@@ -28,21 +28,19 @@
def sample_create_tensorboard_experiment():
- """Snippet for create_tensorboard_experiment"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.CreateTensorboardExperimentRequest(
- parent="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}",
+ parent="parent_value",
tensorboard_experiment_id="tensorboard_experiment_id_value",
)
# Make the request
response = client.create_tensorboard_experiment(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_CreateTensorboardExperiment_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_run_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_run_async.py
index dc604f6480..2ffdf1e9ff 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_run_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_run_async.py
@@ -28,8 +28,6 @@
async def sample_create_tensorboard_run():
- """Snippet for create_tensorboard_run"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceAsyncClient()
@@ -38,7 +36,7 @@ async def sample_create_tensorboard_run():
tensorboard_run.display_name = "display_name_value"
request = aiplatform_v1beta1.CreateTensorboardRunRequest(
- parent="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}",
+ parent="parent_value",
tensorboard_run=tensorboard_run,
tensorboard_run_id="tensorboard_run_id_value",
)
@@ -46,7 +44,7 @@ async def sample_create_tensorboard_run():
# Make the request
response = await client.create_tensorboard_run(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_CreateTensorboardRun_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_run_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_run_sync.py
index 7f145750e9..4565687250 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_run_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_run_sync.py
@@ -28,8 +28,6 @@
def sample_create_tensorboard_run():
- """Snippet for create_tensorboard_run"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceClient()
@@ -38,7 +36,7 @@ def sample_create_tensorboard_run():
tensorboard_run.display_name = "display_name_value"
request = aiplatform_v1beta1.CreateTensorboardRunRequest(
- parent="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}",
+ parent="parent_value",
tensorboard_run=tensorboard_run,
tensorboard_run_id="tensorboard_run_id_value",
)
@@ -46,7 +44,7 @@ def sample_create_tensorboard_run():
# Make the request
response = client.create_tensorboard_run(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_CreateTensorboardRun_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_sync.py
index da9c35a86f..a6efda2c4c 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_sync.py
@@ -28,8 +28,6 @@
def sample_create_tensorboard():
- """Snippet for create_tensorboard"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceClient()
@@ -38,7 +36,7 @@ def sample_create_tensorboard():
tensorboard.display_name = "display_name_value"
request = aiplatform_v1beta1.CreateTensorboardRequest(
- parent="projects/{project}/locations/{location}/tensorboards/{tensorboard}",
+ parent="parent_value",
tensorboard=tensorboard,
)
@@ -48,6 +46,8 @@ def sample_create_tensorboard():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_CreateTensorboard_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_time_series_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_time_series_async.py
index 4cd40c5b64..e1a431e5b6 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_time_series_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_time_series_async.py
@@ -28,8 +28,6 @@
async def sample_create_tensorboard_time_series():
- """Snippet for create_tensorboard_time_series"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceAsyncClient()
@@ -39,14 +37,14 @@ async def sample_create_tensorboard_time_series():
tensorboard_time_series.value_type = "BLOB_SEQUENCE"
request = aiplatform_v1beta1.CreateTensorboardTimeSeriesRequest(
- parent="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}",
+ parent="parent_value",
tensorboard_time_series=tensorboard_time_series,
)
# Make the request
response = await client.create_tensorboard_time_series(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_CreateTensorboardTimeSeries_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_time_series_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_time_series_sync.py
index 785d664f92..3be4868fdc 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_time_series_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_time_series_sync.py
@@ -28,8 +28,6 @@
def sample_create_tensorboard_time_series():
- """Snippet for create_tensorboard_time_series"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceClient()
@@ -39,14 +37,14 @@ def sample_create_tensorboard_time_series():
tensorboard_time_series.value_type = "BLOB_SEQUENCE"
request = aiplatform_v1beta1.CreateTensorboardTimeSeriesRequest(
- parent="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}",
+ parent="parent_value",
tensorboard_time_series=tensorboard_time_series,
)
# Make the request
response = client.create_tensorboard_time_series(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_CreateTensorboardTimeSeries_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_async.py
index 5e80f3bfd8..a4ee230dc7 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_async.py
@@ -28,14 +28,12 @@
async def sample_delete_tensorboard():
- """Snippet for delete_tensorboard"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteTensorboardRequest(
- name="projects/{project}/locations/{location}/tensorboards/{tensorboard}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_tensorboard():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_DeleteTensorboard_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_experiment_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_experiment_async.py
index 67ed54c5f0..963d006bb7 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_experiment_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_experiment_async.py
@@ -28,14 +28,12 @@
async def sample_delete_tensorboard_experiment():
- """Snippet for delete_tensorboard_experiment"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteTensorboardExperimentRequest(
- name="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_tensorboard_experiment():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_DeleteTensorboardExperiment_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_experiment_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_experiment_sync.py
index 13ceac1223..177d6794ed 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_experiment_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_experiment_sync.py
@@ -28,14 +28,12 @@
def sample_delete_tensorboard_experiment():
- """Snippet for delete_tensorboard_experiment"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteTensorboardExperimentRequest(
- name="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_tensorboard_experiment():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_DeleteTensorboardExperiment_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_run_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_run_async.py
index 8f4fdba81a..cb348a4d32 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_run_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_run_async.py
@@ -28,14 +28,12 @@
async def sample_delete_tensorboard_run():
- """Snippet for delete_tensorboard_run"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteTensorboardRunRequest(
- name="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_tensorboard_run():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_DeleteTensorboardRun_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_run_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_run_sync.py
index e005da533e..568ca24fe7 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_run_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_run_sync.py
@@ -28,14 +28,12 @@
def sample_delete_tensorboard_run():
- """Snippet for delete_tensorboard_run"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteTensorboardRunRequest(
- name="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_tensorboard_run():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_DeleteTensorboardRun_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_sync.py
index a62803f9b2..6acad89fed 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_sync.py
@@ -28,14 +28,12 @@
def sample_delete_tensorboard():
- """Snippet for delete_tensorboard"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteTensorboardRequest(
- name="projects/{project}/locations/{location}/tensorboards/{tensorboard}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_tensorboard():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_DeleteTensorboard_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_time_series_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_time_series_async.py
index 5ffd9e044d..d2835d55a8 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_time_series_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_time_series_async.py
@@ -28,14 +28,12 @@
async def sample_delete_tensorboard_time_series():
- """Snippet for delete_tensorboard_time_series"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteTensorboardTimeSeriesRequest(
- name="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_delete_tensorboard_time_series():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_DeleteTensorboardTimeSeries_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_time_series_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_time_series_sync.py
index 6284cdcc46..d2889174eb 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_time_series_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_time_series_sync.py
@@ -28,14 +28,12 @@
def sample_delete_tensorboard_time_series():
- """Snippet for delete_tensorboard_time_series"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteTensorboardTimeSeriesRequest(
- name="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}",
+ name="name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_delete_tensorboard_time_series():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_DeleteTensorboardTimeSeries_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_export_tensorboard_time_series_data_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_export_tensorboard_time_series_data_async.py
index 968e2ed171..cbe98ec9eb 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_export_tensorboard_time_series_data_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_export_tensorboard_time_series_data_async.py
@@ -28,18 +28,18 @@
async def sample_export_tensorboard_time_series_data():
- """Snippet for export_tensorboard_time_series_data"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ExportTensorboardTimeSeriesDataRequest(
- tensorboard_time_series="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}",
+ tensorboard_time_series="tensorboard_time_series_value",
)
# Make the request
page_result = client.export_tensorboard_time_series_data(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_export_tensorboard_time_series_data_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_export_tensorboard_time_series_data_sync.py
index 49ff440cc5..61fa177cb8 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_export_tensorboard_time_series_data_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_export_tensorboard_time_series_data_sync.py
@@ -28,18 +28,18 @@
def sample_export_tensorboard_time_series_data():
- """Snippet for export_tensorboard_time_series_data"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ExportTensorboardTimeSeriesDataRequest(
- tensorboard_time_series="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}",
+ tensorboard_time_series="tensorboard_time_series_value",
)
# Make the request
page_result = client.export_tensorboard_time_series_data(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_async.py
index 3c6ddc7bc5..29ae4e3b08 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_async.py
@@ -28,20 +28,18 @@
async def sample_get_tensorboard():
- """Snippet for get_tensorboard"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetTensorboardRequest(
- name="projects/{project}/locations/{location}/tensorboards/{tensorboard}",
+ name="name_value",
)
# Make the request
response = await client.get_tensorboard(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_GetTensorboard_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_experiment_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_experiment_async.py
index 3fed65d976..73870435e6 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_experiment_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_experiment_async.py
@@ -28,20 +28,18 @@
async def sample_get_tensorboard_experiment():
- """Snippet for get_tensorboard_experiment"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetTensorboardExperimentRequest(
- name="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}",
+ name="name_value",
)
# Make the request
response = await client.get_tensorboard_experiment(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_GetTensorboardExperiment_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_experiment_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_experiment_sync.py
index f2f72a8bed..f9f2d042c6 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_experiment_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_experiment_sync.py
@@ -28,20 +28,18 @@
def sample_get_tensorboard_experiment():
- """Snippet for get_tensorboard_experiment"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetTensorboardExperimentRequest(
- name="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}",
+ name="name_value",
)
# Make the request
response = client.get_tensorboard_experiment(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_GetTensorboardExperiment_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_run_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_run_async.py
index fe1230935e..5887016fc0 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_run_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_run_async.py
@@ -28,20 +28,18 @@
async def sample_get_tensorboard_run():
- """Snippet for get_tensorboard_run"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetTensorboardRunRequest(
- name="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}",
+ name="name_value",
)
# Make the request
response = await client.get_tensorboard_run(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_GetTensorboardRun_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_run_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_run_sync.py
index e34e09d7bc..d8633272c3 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_run_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_run_sync.py
@@ -28,20 +28,18 @@
def sample_get_tensorboard_run():
- """Snippet for get_tensorboard_run"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetTensorboardRunRequest(
- name="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}",
+ name="name_value",
)
# Make the request
response = client.get_tensorboard_run(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_GetTensorboardRun_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_sync.py
index 1cf91e7ab3..73b338d694 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_sync.py
@@ -28,20 +28,18 @@
def sample_get_tensorboard():
- """Snippet for get_tensorboard"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetTensorboardRequest(
- name="projects/{project}/locations/{location}/tensorboards/{tensorboard}",
+ name="name_value",
)
# Make the request
response = client.get_tensorboard(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_GetTensorboard_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_time_series_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_time_series_async.py
index 9353d7139d..6777dde535 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_time_series_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_time_series_async.py
@@ -28,20 +28,18 @@
async def sample_get_tensorboard_time_series():
- """Snippet for get_tensorboard_time_series"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetTensorboardTimeSeriesRequest(
- name="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}",
+ name="name_value",
)
# Make the request
response = await client.get_tensorboard_time_series(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_GetTensorboardTimeSeries_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_time_series_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_time_series_sync.py
index e75cda5c5f..6a022fe9ff 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_time_series_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_time_series_sync.py
@@ -28,20 +28,18 @@
def sample_get_tensorboard_time_series():
- """Snippet for get_tensorboard_time_series"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetTensorboardTimeSeriesRequest(
- name="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}",
+ name="name_value",
)
# Make the request
response = client.get_tensorboard_time_series(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_GetTensorboardTimeSeries_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_experiments_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_experiments_async.py
index 41a5fdd31d..ad45c40c17 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_experiments_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_experiments_async.py
@@ -28,18 +28,18 @@
async def sample_list_tensorboard_experiments():
- """Snippet for list_tensorboard_experiments"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListTensorboardExperimentsRequest(
- parent="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_tensorboard_experiments(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_experiments_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_experiments_sync.py
index 547c7bcb59..4a35d020a3 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_experiments_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_experiments_sync.py
@@ -28,18 +28,18 @@
def sample_list_tensorboard_experiments():
- """Snippet for list_tensorboard_experiments"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListTensorboardExperimentsRequest(
- parent="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_tensorboard_experiments(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_runs_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_runs_async.py
index 7b901a8835..87a767c8f5 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_runs_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_runs_async.py
@@ -28,18 +28,18 @@
async def sample_list_tensorboard_runs():
- """Snippet for list_tensorboard_runs"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListTensorboardRunsRequest(
- parent="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_tensorboard_runs(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_runs_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_runs_sync.py
index 03d0047b44..de8bc437e6 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_runs_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_runs_sync.py
@@ -28,18 +28,18 @@
def sample_list_tensorboard_runs():
- """Snippet for list_tensorboard_runs"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListTensorboardRunsRequest(
- parent="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_tensorboard_runs(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_time_series_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_time_series_async.py
index 04e001a0a6..a2c1694e5f 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_time_series_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_time_series_async.py
@@ -28,18 +28,18 @@
async def sample_list_tensorboard_time_series():
- """Snippet for list_tensorboard_time_series"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListTensorboardTimeSeriesRequest(
- parent="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_tensorboard_time_series(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_time_series_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_time_series_sync.py
index 7fa84f61dc..fe73e6e77b 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_time_series_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_time_series_sync.py
@@ -28,18 +28,18 @@
def sample_list_tensorboard_time_series():
- """Snippet for list_tensorboard_time_series"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListTensorboardTimeSeriesRequest(
- parent="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_tensorboard_time_series(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboards_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboards_async.py
index b7baa6b365..35ddbfac55 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboards_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboards_async.py
@@ -28,18 +28,18 @@
async def sample_list_tensorboards():
- """Snippet for list_tensorboards"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListTensorboardsRequest(
- parent="projects/{project}/locations/{location}/tensorboards/{tensorboard}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_tensorboards(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboards_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboards_sync.py
index a4719a2b88..7a6ceb37d0 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboards_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboards_sync.py
@@ -28,18 +28,18 @@
def sample_list_tensorboards():
- """Snippet for list_tensorboards"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListTensorboardsRequest(
- parent="projects/{project}/locations/{location}/tensorboards/{tensorboard}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_tensorboards(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_read_tensorboard_blob_data_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_read_tensorboard_blob_data_async.py
index e0651a69d6..cd8b5648c4 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_read_tensorboard_blob_data_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_read_tensorboard_blob_data_async.py
@@ -28,18 +28,18 @@
async def sample_read_tensorboard_blob_data():
- """Snippet for read_tensorboard_blob_data"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ReadTensorboardBlobDataRequest(
- time_series="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}",
+ time_series="time_series_value",
)
# Make the request
stream = await client.read_tensorboard_blob_data(request=request)
+
+ # Handle the response
async for response in stream:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_read_tensorboard_blob_data_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_read_tensorboard_blob_data_sync.py
index f998341a54..fa0bca6c36 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_read_tensorboard_blob_data_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_read_tensorboard_blob_data_sync.py
@@ -28,18 +28,18 @@
def sample_read_tensorboard_blob_data():
- """Snippet for read_tensorboard_blob_data"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ReadTensorboardBlobDataRequest(
- time_series="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}",
+ time_series="time_series_value",
)
# Make the request
stream = client.read_tensorboard_blob_data(request=request)
+
+ # Handle the response
for response in stream:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_read_tensorboard_time_series_data_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_read_tensorboard_time_series_data_async.py
index 2d21bd1767..4c2e200cc6 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_read_tensorboard_time_series_data_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_read_tensorboard_time_series_data_async.py
@@ -28,20 +28,18 @@
async def sample_read_tensorboard_time_series_data():
- """Snippet for read_tensorboard_time_series_data"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ReadTensorboardTimeSeriesDataRequest(
- tensorboard_time_series="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}",
+ tensorboard_time_series="tensorboard_time_series_value",
)
# Make the request
response = await client.read_tensorboard_time_series_data(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_ReadTensorboardTimeSeriesData_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_read_tensorboard_time_series_data_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_read_tensorboard_time_series_data_sync.py
index deaad77347..23c4720b26 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_read_tensorboard_time_series_data_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_read_tensorboard_time_series_data_sync.py
@@ -28,20 +28,18 @@
def sample_read_tensorboard_time_series_data():
- """Snippet for read_tensorboard_time_series_data"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ReadTensorboardTimeSeriesDataRequest(
- tensorboard_time_series="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}",
+ tensorboard_time_series="tensorboard_time_series_value",
)
# Make the request
response = client.read_tensorboard_time_series_data(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_ReadTensorboardTimeSeriesData_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_async.py
index 308ec92ad2..1aac60a34c 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_async.py
@@ -28,8 +28,6 @@
async def sample_update_tensorboard():
- """Snippet for update_tensorboard"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceAsyncClient()
@@ -47,6 +45,8 @@ async def sample_update_tensorboard():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_UpdateTensorboard_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_experiment_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_experiment_async.py
index 6f6dfffb68..b547eab8af 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_experiment_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_experiment_async.py
@@ -28,8 +28,6 @@
async def sample_update_tensorboard_experiment():
- """Snippet for update_tensorboard_experiment"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceAsyncClient()
@@ -40,7 +38,7 @@ async def sample_update_tensorboard_experiment():
# Make the request
response = await client.update_tensorboard_experiment(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_UpdateTensorboardExperiment_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_experiment_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_experiment_sync.py
index 7e8ccafc10..1e02a6a159 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_experiment_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_experiment_sync.py
@@ -28,8 +28,6 @@
def sample_update_tensorboard_experiment():
- """Snippet for update_tensorboard_experiment"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceClient()
@@ -40,7 +38,7 @@ def sample_update_tensorboard_experiment():
# Make the request
response = client.update_tensorboard_experiment(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_UpdateTensorboardExperiment_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_run_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_run_async.py
index 49550cbe0e..c4d780773e 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_run_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_run_async.py
@@ -28,8 +28,6 @@
async def sample_update_tensorboard_run():
- """Snippet for update_tensorboard_run"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceAsyncClient()
@@ -44,7 +42,7 @@ async def sample_update_tensorboard_run():
# Make the request
response = await client.update_tensorboard_run(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_UpdateTensorboardRun_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_run_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_run_sync.py
index bdf8df3a1c..2e0c73249d 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_run_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_run_sync.py
@@ -28,8 +28,6 @@
def sample_update_tensorboard_run():
- """Snippet for update_tensorboard_run"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceClient()
@@ -44,7 +42,7 @@ def sample_update_tensorboard_run():
# Make the request
response = client.update_tensorboard_run(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_UpdateTensorboardRun_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_sync.py
index a40c5255a7..60a15a42aa 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_sync.py
@@ -28,8 +28,6 @@
def sample_update_tensorboard():
- """Snippet for update_tensorboard"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceClient()
@@ -47,6 +45,8 @@ def sample_update_tensorboard():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_UpdateTensorboard_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_time_series_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_time_series_async.py
index ce7c8515e1..702d5aa37c 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_time_series_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_time_series_async.py
@@ -28,8 +28,6 @@
async def sample_update_tensorboard_time_series():
- """Snippet for update_tensorboard_time_series"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceAsyncClient()
@@ -45,7 +43,7 @@ async def sample_update_tensorboard_time_series():
# Make the request
response = await client.update_tensorboard_time_series(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_UpdateTensorboardTimeSeries_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_time_series_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_time_series_sync.py
index 9ffe2f1d28..4828d30d3e 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_time_series_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_time_series_sync.py
@@ -28,8 +28,6 @@
def sample_update_tensorboard_time_series():
- """Snippet for update_tensorboard_time_series"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceClient()
@@ -45,7 +43,7 @@ def sample_update_tensorboard_time_series():
# Make the request
response = client.update_tensorboard_time_series(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_UpdateTensorboardTimeSeries_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_write_tensorboard_experiment_data_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_write_tensorboard_experiment_data_async.py
index 9b54eeae34..a8922a6150 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_write_tensorboard_experiment_data_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_write_tensorboard_experiment_data_async.py
@@ -28,26 +28,24 @@
async def sample_write_tensorboard_experiment_data():
- """Snippet for write_tensorboard_experiment_data"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceAsyncClient()
# Initialize request argument(s)
write_run_data_requests = aiplatform_v1beta1.WriteTensorboardRunDataRequest()
- write_run_data_requests.tensorboard_run = "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}"
+ write_run_data_requests.tensorboard_run = "tensorboard_run_value"
write_run_data_requests.time_series_data.tensorboard_time_series_id = "tensorboard_time_series_id_value"
write_run_data_requests.time_series_data.value_type = "BLOB_SEQUENCE"
request = aiplatform_v1beta1.WriteTensorboardExperimentDataRequest(
- tensorboard_experiment="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}",
+ tensorboard_experiment="tensorboard_experiment_value",
write_run_data_requests=write_run_data_requests,
)
# Make the request
response = await client.write_tensorboard_experiment_data(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_WriteTensorboardExperimentData_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_write_tensorboard_experiment_data_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_write_tensorboard_experiment_data_sync.py
index 6c52d0e94d..a88f5f049c 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_write_tensorboard_experiment_data_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_write_tensorboard_experiment_data_sync.py
@@ -28,26 +28,24 @@
def sample_write_tensorboard_experiment_data():
- """Snippet for write_tensorboard_experiment_data"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceClient()
# Initialize request argument(s)
write_run_data_requests = aiplatform_v1beta1.WriteTensorboardRunDataRequest()
- write_run_data_requests.tensorboard_run = "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}"
+ write_run_data_requests.tensorboard_run = "tensorboard_run_value"
write_run_data_requests.time_series_data.tensorboard_time_series_id = "tensorboard_time_series_id_value"
write_run_data_requests.time_series_data.value_type = "BLOB_SEQUENCE"
request = aiplatform_v1beta1.WriteTensorboardExperimentDataRequest(
- tensorboard_experiment="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}",
+ tensorboard_experiment="tensorboard_experiment_value",
write_run_data_requests=write_run_data_requests,
)
# Make the request
response = client.write_tensorboard_experiment_data(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_WriteTensorboardExperimentData_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_write_tensorboard_run_data_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_write_tensorboard_run_data_async.py
index de75b34cf4..384cd87687 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_write_tensorboard_run_data_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_write_tensorboard_run_data_async.py
@@ -28,8 +28,6 @@
async def sample_write_tensorboard_run_data():
- """Snippet for write_tensorboard_run_data"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceAsyncClient()
@@ -39,14 +37,14 @@ async def sample_write_tensorboard_run_data():
time_series_data.value_type = "BLOB_SEQUENCE"
request = aiplatform_v1beta1.WriteTensorboardRunDataRequest(
- tensorboard_run="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}",
+ tensorboard_run="tensorboard_run_value",
time_series_data=time_series_data,
)
# Make the request
response = await client.write_tensorboard_run_data(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_WriteTensorboardRunData_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_write_tensorboard_run_data_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_write_tensorboard_run_data_sync.py
index 2e563442dd..95ef91910e 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_write_tensorboard_run_data_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_tensorboard_service_write_tensorboard_run_data_sync.py
@@ -28,8 +28,6 @@
def sample_write_tensorboard_run_data():
- """Snippet for write_tensorboard_run_data"""
-
# Create a client
client = aiplatform_v1beta1.TensorboardServiceClient()
@@ -39,14 +37,14 @@ def sample_write_tensorboard_run_data():
time_series_data.value_type = "BLOB_SEQUENCE"
request = aiplatform_v1beta1.WriteTensorboardRunDataRequest(
- tensorboard_run="projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}",
+ tensorboard_run="tensorboard_run_value",
time_series_data=time_series_data,
)
# Make the request
response = client.write_tensorboard_run_data(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_TensorboardService_WriteTensorboardRunData_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_add_trial_measurement_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_add_trial_measurement_async.py
index 8d814283a0..6c6a275975 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_add_trial_measurement_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_add_trial_measurement_async.py
@@ -28,20 +28,18 @@
async def sample_add_trial_measurement():
- """Snippet for add_trial_measurement"""
-
# Create a client
client = aiplatform_v1beta1.VizierServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.AddTrialMeasurementRequest(
- trial_name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}",
+ trial_name="trial_name_value",
)
# Make the request
response = await client.add_trial_measurement(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_VizierService_AddTrialMeasurement_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_add_trial_measurement_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_add_trial_measurement_sync.py
index 2811c1c4fe..4af1238908 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_add_trial_measurement_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_add_trial_measurement_sync.py
@@ -28,20 +28,18 @@
def sample_add_trial_measurement():
- """Snippet for add_trial_measurement"""
-
# Create a client
client = aiplatform_v1beta1.VizierServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.AddTrialMeasurementRequest(
- trial_name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}",
+ trial_name="trial_name_value",
)
# Make the request
response = client.add_trial_measurement(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_VizierService_AddTrialMeasurement_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_check_trial_early_stopping_state_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_check_trial_early_stopping_state_async.py
index 18b2fb88d6..9fa6315e86 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_check_trial_early_stopping_state_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_check_trial_early_stopping_state_async.py
@@ -28,14 +28,12 @@
async def sample_check_trial_early_stopping_state():
- """Snippet for check_trial_early_stopping_state"""
-
# Create a client
client = aiplatform_v1beta1.VizierServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.CheckTrialEarlyStoppingStateRequest(
- trial_name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}",
+ trial_name="trial_name_value",
)
# Make the request
@@ -44,6 +42,8 @@ async def sample_check_trial_early_stopping_state():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_VizierService_CheckTrialEarlyStoppingState_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_check_trial_early_stopping_state_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_check_trial_early_stopping_state_sync.py
index a8b1da8794..46227517cc 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_check_trial_early_stopping_state_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_check_trial_early_stopping_state_sync.py
@@ -28,14 +28,12 @@
def sample_check_trial_early_stopping_state():
- """Snippet for check_trial_early_stopping_state"""
-
# Create a client
client = aiplatform_v1beta1.VizierServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.CheckTrialEarlyStoppingStateRequest(
- trial_name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}",
+ trial_name="trial_name_value",
)
# Make the request
@@ -44,6 +42,8 @@ def sample_check_trial_early_stopping_state():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_VizierService_CheckTrialEarlyStoppingState_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_complete_trial_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_complete_trial_async.py
index aad6e7fe8e..0ce1d74398 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_complete_trial_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_complete_trial_async.py
@@ -28,20 +28,18 @@
async def sample_complete_trial():
- """Snippet for complete_trial"""
-
# Create a client
client = aiplatform_v1beta1.VizierServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.CompleteTrialRequest(
- name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}",
+ name="name_value",
)
# Make the request
response = await client.complete_trial(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_VizierService_CompleteTrial_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_complete_trial_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_complete_trial_sync.py
index 7264b03516..9be064f14e 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_complete_trial_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_complete_trial_sync.py
@@ -28,20 +28,18 @@
def sample_complete_trial():
- """Snippet for complete_trial"""
-
# Create a client
client = aiplatform_v1beta1.VizierServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.CompleteTrialRequest(
- name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}",
+ name="name_value",
)
# Make the request
response = client.complete_trial(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_VizierService_CompleteTrial_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_create_study_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_create_study_async.py
index 0baf84e3ee..54e6276554 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_create_study_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_create_study_async.py
@@ -28,8 +28,6 @@
async def sample_create_study():
- """Snippet for create_study"""
-
# Create a client
client = aiplatform_v1beta1.VizierServiceAsyncClient()
@@ -43,14 +41,14 @@ async def sample_create_study():
study.study_spec.parameters.parameter_id = "parameter_id_value"
request = aiplatform_v1beta1.CreateStudyRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
study=study,
)
# Make the request
response = await client.create_study(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_VizierService_CreateStudy_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_create_study_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_create_study_sync.py
index 1aac074d7f..490f7a685e 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_create_study_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_create_study_sync.py
@@ -28,8 +28,6 @@
def sample_create_study():
- """Snippet for create_study"""
-
# Create a client
client = aiplatform_v1beta1.VizierServiceClient()
@@ -43,14 +41,14 @@ def sample_create_study():
study.study_spec.parameters.parameter_id = "parameter_id_value"
request = aiplatform_v1beta1.CreateStudyRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
study=study,
)
# Make the request
response = client.create_study(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_VizierService_CreateStudy_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_create_trial_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_create_trial_async.py
index 788fa56da8..63141263c7 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_create_trial_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_create_trial_async.py
@@ -28,20 +28,18 @@
async def sample_create_trial():
- """Snippet for create_trial"""
-
# Create a client
client = aiplatform_v1beta1.VizierServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.CreateTrialRequest(
- parent="projects/{project}/locations/{location}/studies/{study}",
+ parent="parent_value",
)
# Make the request
response = await client.create_trial(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_VizierService_CreateTrial_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_create_trial_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_create_trial_sync.py
index 8c9923480d..d4c3826f27 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_create_trial_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_create_trial_sync.py
@@ -28,20 +28,18 @@
def sample_create_trial():
- """Snippet for create_trial"""
-
# Create a client
client = aiplatform_v1beta1.VizierServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.CreateTrialRequest(
- parent="projects/{project}/locations/{location}/studies/{study}",
+ parent="parent_value",
)
# Make the request
response = client.create_trial(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_VizierService_CreateTrial_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_delete_study_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_delete_study_async.py
index 8a7080f734..64e39de371 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_delete_study_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_delete_study_async.py
@@ -28,18 +28,16 @@
async def sample_delete_study():
- """Snippet for delete_study"""
-
# Create a client
client = aiplatform_v1beta1.VizierServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteStudyRequest(
- name="projects/{project}/locations/{location}/studies/{study}",
+ name="name_value",
)
# Make the request
- response = await client.delete_study(request=request)
+ await client.delete_study(request=request)
# [END aiplatform_generated_aiplatform_v1beta1_VizierService_DeleteStudy_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_delete_study_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_delete_study_sync.py
index 7d023513c8..1e7f9f76dc 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_delete_study_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_delete_study_sync.py
@@ -28,18 +28,16 @@
def sample_delete_study():
- """Snippet for delete_study"""
-
# Create a client
client = aiplatform_v1beta1.VizierServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteStudyRequest(
- name="projects/{project}/locations/{location}/studies/{study}",
+ name="name_value",
)
# Make the request
- response = client.delete_study(request=request)
+ client.delete_study(request=request)
# [END aiplatform_generated_aiplatform_v1beta1_VizierService_DeleteStudy_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_delete_trial_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_delete_trial_async.py
index 8f358f85a7..9be5bdc1ed 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_delete_trial_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_delete_trial_async.py
@@ -28,18 +28,16 @@
async def sample_delete_trial():
- """Snippet for delete_trial"""
-
# Create a client
client = aiplatform_v1beta1.VizierServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteTrialRequest(
- name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}",
+ name="name_value",
)
# Make the request
- response = await client.delete_trial(request=request)
+ await client.delete_trial(request=request)
# [END aiplatform_generated_aiplatform_v1beta1_VizierService_DeleteTrial_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_delete_trial_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_delete_trial_sync.py
index 82aca0da47..34fce74d4d 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_delete_trial_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_delete_trial_sync.py
@@ -28,18 +28,16 @@
def sample_delete_trial():
- """Snippet for delete_trial"""
-
# Create a client
client = aiplatform_v1beta1.VizierServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteTrialRequest(
- name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}",
+ name="name_value",
)
# Make the request
- response = client.delete_trial(request=request)
+ client.delete_trial(request=request)
# [END aiplatform_generated_aiplatform_v1beta1_VizierService_DeleteTrial_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_get_study_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_get_study_async.py
index 3625192d0c..7752bfaa85 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_get_study_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_get_study_async.py
@@ -28,20 +28,18 @@
async def sample_get_study():
- """Snippet for get_study"""
-
# Create a client
client = aiplatform_v1beta1.VizierServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetStudyRequest(
- name="projects/{project}/locations/{location}/studies/{study}",
+ name="name_value",
)
# Make the request
response = await client.get_study(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_VizierService_GetStudy_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_get_study_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_get_study_sync.py
index f75cfd0a8f..c7e2477bf4 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_get_study_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_get_study_sync.py
@@ -28,20 +28,18 @@
def sample_get_study():
- """Snippet for get_study"""
-
# Create a client
client = aiplatform_v1beta1.VizierServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetStudyRequest(
- name="projects/{project}/locations/{location}/studies/{study}",
+ name="name_value",
)
# Make the request
response = client.get_study(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_VizierService_GetStudy_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_get_trial_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_get_trial_async.py
index cd3c5bc252..9dc6b493e8 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_get_trial_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_get_trial_async.py
@@ -28,20 +28,18 @@
async def sample_get_trial():
- """Snippet for get_trial"""
-
# Create a client
client = aiplatform_v1beta1.VizierServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetTrialRequest(
- name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}",
+ name="name_value",
)
# Make the request
response = await client.get_trial(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_VizierService_GetTrial_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_get_trial_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_get_trial_sync.py
index e85c248ed9..298c5c4e6d 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_get_trial_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_get_trial_sync.py
@@ -28,20 +28,18 @@
def sample_get_trial():
- """Snippet for get_trial"""
-
# Create a client
client = aiplatform_v1beta1.VizierServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetTrialRequest(
- name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}",
+ name="name_value",
)
# Make the request
response = client.get_trial(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_VizierService_GetTrial_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_list_optimal_trials_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_list_optimal_trials_async.py
index c602b1fa86..51560cd7b2 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_list_optimal_trials_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_list_optimal_trials_async.py
@@ -28,20 +28,18 @@
async def sample_list_optimal_trials():
- """Snippet for list_optimal_trials"""
-
# Create a client
client = aiplatform_v1beta1.VizierServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListOptimalTrialsRequest(
- parent="projects/{project}/locations/{location}/studies/{study}",
+ parent="parent_value",
)
# Make the request
response = await client.list_optimal_trials(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_VizierService_ListOptimalTrials_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_list_optimal_trials_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_list_optimal_trials_sync.py
index 602524830b..110990e99b 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_list_optimal_trials_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_list_optimal_trials_sync.py
@@ -28,20 +28,18 @@
def sample_list_optimal_trials():
- """Snippet for list_optimal_trials"""
-
# Create a client
client = aiplatform_v1beta1.VizierServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListOptimalTrialsRequest(
- parent="projects/{project}/locations/{location}/studies/{study}",
+ parent="parent_value",
)
# Make the request
response = client.list_optimal_trials(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_VizierService_ListOptimalTrials_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_list_studies_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_list_studies_async.py
index 99e2fc3399..a09c7c410b 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_list_studies_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_list_studies_async.py
@@ -28,18 +28,18 @@
async def sample_list_studies():
- """Snippet for list_studies"""
-
# Create a client
client = aiplatform_v1beta1.VizierServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListStudiesRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_studies(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_list_studies_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_list_studies_sync.py
index 8809dd06fc..12285eb733 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_list_studies_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_list_studies_sync.py
@@ -28,18 +28,18 @@
def sample_list_studies():
- """Snippet for list_studies"""
-
# Create a client
client = aiplatform_v1beta1.VizierServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListStudiesRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_studies(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_list_trials_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_list_trials_async.py
index 6b09fc5122..c61bbc499b 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_list_trials_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_list_trials_async.py
@@ -28,18 +28,18 @@
async def sample_list_trials():
- """Snippet for list_trials"""
-
# Create a client
client = aiplatform_v1beta1.VizierServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListTrialsRequest(
- parent="projects/{project}/locations/{location}/studies/{study}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_trials(request=request)
+
+ # Handle the response
async for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_list_trials_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_list_trials_sync.py
index 20d4c02af8..fdc006231a 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_list_trials_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_list_trials_sync.py
@@ -28,18 +28,18 @@
def sample_list_trials():
- """Snippet for list_trials"""
-
# Create a client
client = aiplatform_v1beta1.VizierServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListTrialsRequest(
- parent="projects/{project}/locations/{location}/studies/{study}",
+ parent="parent_value",
)
# Make the request
page_result = client.list_trials(request=request)
+
+ # Handle the response
for response in page_result:
print(response)
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_lookup_study_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_lookup_study_async.py
index 8882edbba4..08261c53f1 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_lookup_study_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_lookup_study_async.py
@@ -28,21 +28,19 @@
async def sample_lookup_study():
- """Snippet for lookup_study"""
-
# Create a client
client = aiplatform_v1beta1.VizierServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.LookupStudyRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
display_name="display_name_value",
)
# Make the request
response = await client.lookup_study(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_VizierService_LookupStudy_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_lookup_study_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_lookup_study_sync.py
index 5d076d04aa..fb7242f8a5 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_lookup_study_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_lookup_study_sync.py
@@ -28,21 +28,19 @@
def sample_lookup_study():
- """Snippet for lookup_study"""
-
# Create a client
client = aiplatform_v1beta1.VizierServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.LookupStudyRequest(
- parent="projects/{project}/locations/{location}",
+ parent="parent_value",
display_name="display_name_value",
)
# Make the request
response = client.lookup_study(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_VizierService_LookupStudy_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_stop_trial_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_stop_trial_async.py
index 8361c3a408..3f87e48b7d 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_stop_trial_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_stop_trial_async.py
@@ -28,20 +28,18 @@
async def sample_stop_trial():
- """Snippet for stop_trial"""
-
# Create a client
client = aiplatform_v1beta1.VizierServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.StopTrialRequest(
- name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}",
+ name="name_value",
)
# Make the request
response = await client.stop_trial(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_VizierService_StopTrial_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_stop_trial_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_stop_trial_sync.py
index 5d9e1aa5c6..6ccc3ed2fa 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_stop_trial_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_stop_trial_sync.py
@@ -28,20 +28,18 @@
def sample_stop_trial():
- """Snippet for stop_trial"""
-
# Create a client
client = aiplatform_v1beta1.VizierServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.StopTrialRequest(
- name="projects/{project}/locations/{location}/studies/{study}/trials/{trial}",
+ name="name_value",
)
# Make the request
response = client.stop_trial(request=request)
- # Handle response
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_VizierService_StopTrial_sync]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_suggest_trials_async.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_suggest_trials_async.py
index 02d2db2bef..87910c8156 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_suggest_trials_async.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_suggest_trials_async.py
@@ -28,14 +28,12 @@
async def sample_suggest_trials():
- """Snippet for suggest_trials"""
-
# Create a client
client = aiplatform_v1beta1.VizierServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.SuggestTrialsRequest(
- parent="projects/{project}/locations/{location}/studies/{study}",
+ parent="parent_value",
suggestion_count=1744,
client_id="client_id_value",
)
@@ -46,6 +44,8 @@ async def sample_suggest_trials():
print("Waiting for operation to complete...")
response = await operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_VizierService_SuggestTrials_async]
diff --git a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_suggest_trials_sync.py b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_suggest_trials_sync.py
index e4b2fe009c..2ed4076392 100644
--- a/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_suggest_trials_sync.py
+++ b/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_suggest_trials_sync.py
@@ -28,14 +28,12 @@
def sample_suggest_trials():
- """Snippet for suggest_trials"""
-
# Create a client
client = aiplatform_v1beta1.VizierServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.SuggestTrialsRequest(
- parent="projects/{project}/locations/{location}/studies/{study}",
+ parent="parent_value",
suggestion_count=1744,
client_id="client_id_value",
)
@@ -46,6 +44,8 @@ def sample_suggest_trials():
print("Waiting for operation to complete...")
response = operation.result()
+
+ # Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_VizierService_SuggestTrials_sync]
diff --git a/samples/generated_samples/snippet_metadata_aiplatform_v1.json b/samples/generated_samples/snippet_metadata_aiplatform_v1.json
new file mode 100644
index 0000000000..0890737ce0
--- /dev/null
+++ b/samples/generated_samples/snippet_metadata_aiplatform_v1.json
@@ -0,0 +1,16340 @@
+{
+ "snippets": [
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "DatasetService"
+ },
+ "shortName": "CreateDataset"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_dataset_service_create_dataset_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_DatasetService_CreateDataset_async",
+ "segments": [
+ {
+ "end": 54,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 54,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 51,
+ "start": 45,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 55,
+ "start": 52,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "DatasetService"
+ },
+ "shortName": "CreateDataset"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_dataset_service_create_dataset_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_DatasetService_CreateDataset_sync",
+ "segments": [
+ {
+ "end": 54,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 54,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 51,
+ "start": 45,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 55,
+ "start": 52,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "DatasetService"
+ },
+ "shortName": "DeleteDataset"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_dataset_service_delete_dataset_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_DatasetService_DeleteDataset_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "DatasetService"
+ },
+ "shortName": "DeleteDataset"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_dataset_service_delete_dataset_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_DatasetService_DeleteDataset_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "DatasetService"
+ },
+ "shortName": "ExportData"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_dataset_service_export_data_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_DatasetService_ExportData_async",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "DatasetService"
+ },
+ "shortName": "ExportData"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_dataset_service_export_data_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_DatasetService_ExportData_sync",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "DatasetService"
+ },
+ "shortName": "GetAnnotationSpec"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_dataset_service_get_annotation_spec_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_DatasetService_GetAnnotationSpec_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "DatasetService"
+ },
+ "shortName": "GetAnnotationSpec"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_dataset_service_get_annotation_spec_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_DatasetService_GetAnnotationSpec_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "DatasetService"
+ },
+ "shortName": "GetDataset"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_dataset_service_get_dataset_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_DatasetService_GetDataset_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "DatasetService"
+ },
+ "shortName": "GetDataset"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_dataset_service_get_dataset_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_DatasetService_GetDataset_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "DatasetService"
+ },
+ "shortName": "ImportData"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_dataset_service_import_data_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_DatasetService_ImportData_async",
+ "segments": [
+ {
+ "end": 53,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 53,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 54,
+ "start": 51,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "DatasetService"
+ },
+ "shortName": "ImportData"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_dataset_service_import_data_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_DatasetService_ImportData_sync",
+ "segments": [
+ {
+ "end": 53,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 53,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 54,
+ "start": 51,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "DatasetService"
+ },
+ "shortName": "ListAnnotations"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_dataset_service_list_annotations_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_DatasetService_ListAnnotations_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "DatasetService"
+ },
+ "shortName": "ListAnnotations"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_dataset_service_list_annotations_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_DatasetService_ListAnnotations_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "DatasetService"
+ },
+ "shortName": "ListDataItems"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_dataset_service_list_data_items_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_DatasetService_ListDataItems_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "DatasetService"
+ },
+ "shortName": "ListDataItems"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_dataset_service_list_data_items_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_DatasetService_ListDataItems_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "DatasetService"
+ },
+ "shortName": "ListDatasets"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_dataset_service_list_datasets_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_DatasetService_ListDatasets_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "DatasetService"
+ },
+ "shortName": "ListDatasets"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_dataset_service_list_datasets_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_DatasetService_ListDatasets_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "DatasetService"
+ },
+ "shortName": "UpdateDataset"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_dataset_service_update_dataset_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_DatasetService_UpdateDataset_async",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "DatasetService"
+ },
+ "shortName": "UpdateDataset"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_dataset_service_update_dataset_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_DatasetService_UpdateDataset_sync",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "EndpointService"
+ },
+ "shortName": "CreateEndpoint"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_endpoint_service_create_endpoint_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_EndpointService_CreateEndpoint_async",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "EndpointService"
+ },
+ "shortName": "CreateEndpoint"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_endpoint_service_create_endpoint_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_EndpointService_CreateEndpoint_sync",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "EndpointService"
+ },
+ "shortName": "DeleteEndpoint"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_endpoint_service_delete_endpoint_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_EndpointService_DeleteEndpoint_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "EndpointService"
+ },
+ "shortName": "DeleteEndpoint"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_endpoint_service_delete_endpoint_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_EndpointService_DeleteEndpoint_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "EndpointService"
+ },
+ "shortName": "DeployModel"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_endpoint_service_deploy_model_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_EndpointService_DeployModel_async",
+ "segments": [
+ {
+ "end": 53,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 53,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 54,
+ "start": 51,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "EndpointService"
+ },
+ "shortName": "DeployModel"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_endpoint_service_deploy_model_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_EndpointService_DeployModel_sync",
+ "segments": [
+ {
+ "end": 53,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 53,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 54,
+ "start": 51,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "EndpointService"
+ },
+ "shortName": "GetEndpoint"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_endpoint_service_get_endpoint_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_EndpointService_GetEndpoint_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "EndpointService"
+ },
+ "shortName": "GetEndpoint"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_endpoint_service_get_endpoint_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_EndpointService_GetEndpoint_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "EndpointService"
+ },
+ "shortName": "ListEndpoints"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_endpoint_service_list_endpoints_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_EndpointService_ListEndpoints_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "EndpointService"
+ },
+ "shortName": "ListEndpoints"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_endpoint_service_list_endpoints_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_EndpointService_ListEndpoints_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "EndpointService"
+ },
+ "shortName": "UndeployModel"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_endpoint_service_undeploy_model_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_EndpointService_UndeployModel_async",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "EndpointService"
+ },
+ "shortName": "UndeployModel"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_endpoint_service_undeploy_model_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_EndpointService_UndeployModel_sync",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "EndpointService"
+ },
+ "shortName": "UpdateEndpoint"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_endpoint_service_update_endpoint_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_EndpointService_UpdateEndpoint_async",
+ "segments": [
+ {
+ "end": 47,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 47,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 42,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 48,
+ "start": 45,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "EndpointService"
+ },
+ "shortName": "UpdateEndpoint"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_endpoint_service_update_endpoint_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_EndpointService_UpdateEndpoint_sync",
+ "segments": [
+ {
+ "end": 47,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 47,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 42,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 48,
+ "start": 45,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreOnlineServingService"
+ },
+ "shortName": "ReadFeatureValues"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_online_serving_service_read_feature_values_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreOnlineServingService_ReadFeatureValues_async",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreOnlineServingService"
+ },
+ "shortName": "ReadFeatureValues"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_online_serving_service_read_feature_values_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreOnlineServingService_ReadFeatureValues_sync",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreOnlineServingService"
+ },
+ "shortName": "StreamingReadFeatureValues"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_online_serving_service_streaming_read_feature_values_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreOnlineServingService_StreamingReadFeatureValues_async",
+ "segments": [
+ {
+ "end": 50,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 50,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 51,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreOnlineServingService"
+ },
+ "shortName": "StreamingReadFeatureValues"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_online_serving_service_streaming_read_feature_values_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreOnlineServingService_StreamingReadFeatureValues_sync",
+ "segments": [
+ {
+ "end": 50,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 50,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 51,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "BatchCreateFeatures"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_service_batch_create_features_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreService_BatchCreateFeatures_async",
+ "segments": [
+ {
+ "end": 54,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 54,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 51,
+ "start": 45,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 55,
+ "start": 52,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "BatchCreateFeatures"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_service_batch_create_features_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreService_BatchCreateFeatures_sync",
+ "segments": [
+ {
+ "end": 54,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 54,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 51,
+ "start": 45,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 55,
+ "start": 52,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "BatchReadFeatureValues"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_service_batch_read_feature_values_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreService_BatchReadFeatureValues_async",
+ "segments": [
+ {
+ "end": 61,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 61,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 51,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 58,
+ "start": 52,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 62,
+ "start": 59,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "BatchReadFeatureValues"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_service_batch_read_feature_values_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreService_BatchReadFeatureValues_sync",
+ "segments": [
+ {
+ "end": 61,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 61,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 51,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 58,
+ "start": 52,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 62,
+ "start": 59,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "CreateEntityType"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_service_create_entity_type_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreService_CreateEntityType_async",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "CreateEntityType"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_service_create_entity_type_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreService_CreateEntityType_sync",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "CreateFeature"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_service_create_feature_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreService_CreateFeature_async",
+ "segments": [
+ {
+ "end": 53,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 53,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 54,
+ "start": 51,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "CreateFeature"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_service_create_feature_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreService_CreateFeature_sync",
+ "segments": [
+ {
+ "end": 53,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 53,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 54,
+ "start": 51,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "CreateFeaturestore"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_service_create_featurestore_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreService_CreateFeaturestore_async",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "CreateFeaturestore"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_service_create_featurestore_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreService_CreateFeaturestore_sync",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "DeleteEntityType"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_service_delete_entity_type_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreService_DeleteEntityType_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "DeleteEntityType"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_service_delete_entity_type_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreService_DeleteEntityType_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "DeleteFeature"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_service_delete_feature_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreService_DeleteFeature_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "DeleteFeature"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_service_delete_feature_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreService_DeleteFeature_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "DeleteFeaturestore"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_service_delete_featurestore_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreService_DeleteFeaturestore_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "DeleteFeaturestore"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_service_delete_featurestore_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreService_DeleteFeaturestore_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "ExportFeatureValues"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_service_export_feature_values_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreService_ExportFeatureValues_async",
+ "segments": [
+ {
+ "end": 56,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 56,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 53,
+ "start": 47,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 57,
+ "start": 54,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "ExportFeatureValues"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_service_export_feature_values_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreService_ExportFeatureValues_sync",
+ "segments": [
+ {
+ "end": 56,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 56,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 53,
+ "start": 47,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 57,
+ "start": 54,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "GetEntityType"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_service_get_entity_type_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreService_GetEntityType_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "GetEntityType"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_service_get_entity_type_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreService_GetEntityType_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "GetFeature"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_service_get_feature_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreService_GetFeature_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "GetFeature"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_service_get_feature_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreService_GetFeature_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "GetFeaturestore"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_service_get_featurestore_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreService_GetFeaturestore_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "GetFeaturestore"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_service_get_featurestore_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreService_GetFeaturestore_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "ImportFeatureValues"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_service_import_feature_values_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreService_ImportFeatureValues_async",
+ "segments": [
+ {
+ "end": 57,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 57,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 54,
+ "start": 48,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 58,
+ "start": 55,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "ImportFeatureValues"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_service_import_feature_values_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreService_ImportFeatureValues_sync",
+ "segments": [
+ {
+ "end": 57,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 57,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 54,
+ "start": 48,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 58,
+ "start": 55,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "ListEntityTypes"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_service_list_entity_types_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreService_ListEntityTypes_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "ListEntityTypes"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_service_list_entity_types_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreService_ListEntityTypes_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "ListFeatures"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_service_list_features_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreService_ListFeatures_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "ListFeatures"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_service_list_features_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreService_ListFeatures_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "ListFeaturestores"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_service_list_featurestores_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreService_ListFeaturestores_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "ListFeaturestores"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_service_list_featurestores_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreService_ListFeaturestores_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "SearchFeatures"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_service_search_features_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreService_SearchFeatures_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "SearchFeatures"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_service_search_features_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreService_SearchFeatures_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "UpdateEntityType"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_service_update_entity_type_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreService_UpdateEntityType_async",
+ "segments": [
+ {
+ "end": 43,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 43,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 37,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 44,
+ "start": 41,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "UpdateEntityType"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_service_update_entity_type_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreService_UpdateEntityType_sync",
+ "segments": [
+ {
+ "end": 43,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 43,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 37,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 44,
+ "start": 41,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "UpdateFeature"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_service_update_feature_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreService_UpdateFeature_async",
+ "segments": [
+ {
+ "end": 47,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 47,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 42,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 48,
+ "start": 45,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "UpdateFeature"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_service_update_feature_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreService_UpdateFeature_sync",
+ "segments": [
+ {
+ "end": 47,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 47,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 42,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 48,
+ "start": 45,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "UpdateFeaturestore"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_service_update_featurestore_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreService_UpdateFeaturestore_async",
+ "segments": [
+ {
+ "end": 47,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 47,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 37,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 38,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 48,
+ "start": 45,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "UpdateFeaturestore"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_featurestore_service_update_featurestore_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_FeaturestoreService_UpdateFeaturestore_sync",
+ "segments": [
+ {
+ "end": 47,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 47,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 37,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 38,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 48,
+ "start": 45,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "IndexEndpointService"
+ },
+ "shortName": "CreateIndexEndpoint"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_index_endpoint_service_create_index_endpoint_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_IndexEndpointService_CreateIndexEndpoint_async",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "IndexEndpointService"
+ },
+ "shortName": "CreateIndexEndpoint"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_index_endpoint_service_create_index_endpoint_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_IndexEndpointService_CreateIndexEndpoint_sync",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "IndexEndpointService"
+ },
+ "shortName": "DeleteIndexEndpoint"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_index_endpoint_service_delete_index_endpoint_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_IndexEndpointService_DeleteIndexEndpoint_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "IndexEndpointService"
+ },
+ "shortName": "DeleteIndexEndpoint"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_index_endpoint_service_delete_index_endpoint_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_IndexEndpointService_DeleteIndexEndpoint_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "IndexEndpointService"
+ },
+ "shortName": "DeployIndex"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_index_endpoint_service_deploy_index_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_IndexEndpointService_DeployIndex_async",
+ "segments": [
+ {
+ "end": 53,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 53,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 54,
+ "start": 51,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "IndexEndpointService"
+ },
+ "shortName": "DeployIndex"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_index_endpoint_service_deploy_index_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_IndexEndpointService_DeployIndex_sync",
+ "segments": [
+ {
+ "end": 53,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 53,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 54,
+ "start": 51,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "IndexEndpointService"
+ },
+ "shortName": "GetIndexEndpoint"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_index_endpoint_service_get_index_endpoint_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_IndexEndpointService_GetIndexEndpoint_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "IndexEndpointService"
+ },
+ "shortName": "GetIndexEndpoint"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_index_endpoint_service_get_index_endpoint_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_IndexEndpointService_GetIndexEndpoint_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "IndexEndpointService"
+ },
+ "shortName": "ListIndexEndpoints"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_index_endpoint_service_list_index_endpoints_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_IndexEndpointService_ListIndexEndpoints_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "IndexEndpointService"
+ },
+ "shortName": "ListIndexEndpoints"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_index_endpoint_service_list_index_endpoints_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_IndexEndpointService_ListIndexEndpoints_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "IndexEndpointService"
+ },
+ "shortName": "MutateDeployedIndex"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_index_endpoint_service_mutate_deployed_index_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_IndexEndpointService_MutateDeployedIndex_async",
+ "segments": [
+ {
+ "end": 53,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 53,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 54,
+ "start": 51,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "IndexEndpointService"
+ },
+ "shortName": "MutateDeployedIndex"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_index_endpoint_service_mutate_deployed_index_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_IndexEndpointService_MutateDeployedIndex_sync",
+ "segments": [
+ {
+ "end": 53,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 53,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 54,
+ "start": 51,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "IndexEndpointService"
+ },
+ "shortName": "UndeployIndex"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_index_endpoint_service_undeploy_index_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_IndexEndpointService_UndeployIndex_async",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "IndexEndpointService"
+ },
+ "shortName": "UndeployIndex"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_index_endpoint_service_undeploy_index_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_IndexEndpointService_UndeployIndex_sync",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "IndexEndpointService"
+ },
+ "shortName": "UpdateIndexEndpoint"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_index_endpoint_service_update_index_endpoint_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_IndexEndpointService_UpdateIndexEndpoint_async",
+ "segments": [
+ {
+ "end": 47,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 47,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 42,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 48,
+ "start": 45,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "IndexEndpointService"
+ },
+ "shortName": "UpdateIndexEndpoint"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_index_endpoint_service_update_index_endpoint_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_IndexEndpointService_UpdateIndexEndpoint_sync",
+ "segments": [
+ {
+ "end": 47,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 47,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 42,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 48,
+ "start": 45,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "IndexService"
+ },
+ "shortName": "CreateIndex"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_index_service_create_index_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_IndexService_CreateIndex_async",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "IndexService"
+ },
+ "shortName": "CreateIndex"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_index_service_create_index_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_IndexService_CreateIndex_sync",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "IndexService"
+ },
+ "shortName": "DeleteIndex"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_index_service_delete_index_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_IndexService_DeleteIndex_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "IndexService"
+ },
+ "shortName": "DeleteIndex"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_index_service_delete_index_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_IndexService_DeleteIndex_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "IndexService"
+ },
+ "shortName": "GetIndex"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_index_service_get_index_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_IndexService_GetIndex_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "IndexService"
+ },
+ "shortName": "GetIndex"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_index_service_get_index_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_IndexService_GetIndex_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "IndexService"
+ },
+ "shortName": "ListIndexes"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_index_service_list_indexes_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_IndexService_ListIndexes_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "IndexService"
+ },
+ "shortName": "ListIndexes"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_index_service_list_indexes_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_IndexService_ListIndexes_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "IndexService"
+ },
+ "shortName": "UpdateIndex"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_index_service_update_index_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_IndexService_UpdateIndex_async",
+ "segments": [
+ {
+ "end": 51,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 51,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 42,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 52,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "IndexService"
+ },
+ "shortName": "UpdateIndex"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_index_service_update_index_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_IndexService_UpdateIndex_sync",
+ "segments": [
+ {
+ "end": 51,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 51,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 42,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 52,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "CancelBatchPredictionJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_cancel_batch_prediction_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_CancelBatchPredictionJob_async",
+ "segments": [
+ {
+ "end": 42,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 42,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "CancelBatchPredictionJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_cancel_batch_prediction_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_CancelBatchPredictionJob_sync",
+ "segments": [
+ {
+ "end": 42,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 42,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "CancelCustomJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_cancel_custom_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_CancelCustomJob_async",
+ "segments": [
+ {
+ "end": 42,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 42,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "CancelCustomJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_cancel_custom_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_CancelCustomJob_sync",
+ "segments": [
+ {
+ "end": 42,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 42,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "CancelDataLabelingJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_cancel_data_labeling_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_CancelDataLabelingJob_async",
+ "segments": [
+ {
+ "end": 42,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 42,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "CancelDataLabelingJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_cancel_data_labeling_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_CancelDataLabelingJob_sync",
+ "segments": [
+ {
+ "end": 42,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 42,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "CancelHyperparameterTuningJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_cancel_hyperparameter_tuning_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_CancelHyperparameterTuningJob_async",
+ "segments": [
+ {
+ "end": 42,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 42,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "CancelHyperparameterTuningJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_cancel_hyperparameter_tuning_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_CancelHyperparameterTuningJob_sync",
+ "segments": [
+ {
+ "end": 42,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 42,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "CreateBatchPredictionJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_create_batch_prediction_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_CreateBatchPredictionJob_async",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 47,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "CreateBatchPredictionJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_create_batch_prediction_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_CreateBatchPredictionJob_sync",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 47,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "CreateCustomJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_create_custom_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_CreateCustomJob_async",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "CreateCustomJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_create_custom_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_CreateCustomJob_sync",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "CreateDataLabelingJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_create_data_labeling_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_CreateDataLabelingJob_async",
+ "segments": [
+ {
+ "end": 53,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 53,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 48,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 54,
+ "start": 51,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "CreateDataLabelingJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_create_data_labeling_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_CreateDataLabelingJob_sync",
+ "segments": [
+ {
+ "end": 53,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 53,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 48,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 54,
+ "start": 51,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "CreateHyperparameterTuningJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_create_hyperparameter_tuning_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_CreateHyperparameterTuningJob_async",
+ "segments": [
+ {
+ "end": 56,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 56,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 53,
+ "start": 51,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 57,
+ "start": 54,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "CreateHyperparameterTuningJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_create_hyperparameter_tuning_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_CreateHyperparameterTuningJob_sync",
+ "segments": [
+ {
+ "end": 56,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 56,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 53,
+ "start": 51,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 57,
+ "start": 54,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "CreateModelDeploymentMonitoringJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_create_model_deployment_monitoring_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_CreateModelDeploymentMonitoringJob_async",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "CreateModelDeploymentMonitoringJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_create_model_deployment_monitoring_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_CreateModelDeploymentMonitoringJob_sync",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "DeleteBatchPredictionJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_delete_batch_prediction_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_DeleteBatchPredictionJob_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "DeleteBatchPredictionJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_delete_batch_prediction_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_DeleteBatchPredictionJob_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "DeleteCustomJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_delete_custom_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_DeleteCustomJob_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "DeleteCustomJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_delete_custom_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_DeleteCustomJob_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "DeleteDataLabelingJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_delete_data_labeling_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_DeleteDataLabelingJob_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "DeleteDataLabelingJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_delete_data_labeling_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_DeleteDataLabelingJob_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "DeleteHyperparameterTuningJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_delete_hyperparameter_tuning_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_DeleteHyperparameterTuningJob_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "DeleteHyperparameterTuningJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_delete_hyperparameter_tuning_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_DeleteHyperparameterTuningJob_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "DeleteModelDeploymentMonitoringJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_delete_model_deployment_monitoring_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_DeleteModelDeploymentMonitoringJob_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "DeleteModelDeploymentMonitoringJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_delete_model_deployment_monitoring_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_DeleteModelDeploymentMonitoringJob_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "GetBatchPredictionJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_get_batch_prediction_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_GetBatchPredictionJob_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "GetBatchPredictionJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_get_batch_prediction_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_GetBatchPredictionJob_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "GetCustomJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_get_custom_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_GetCustomJob_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "GetCustomJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_get_custom_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_GetCustomJob_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "GetDataLabelingJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_get_data_labeling_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_GetDataLabelingJob_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "GetDataLabelingJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_get_data_labeling_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_GetDataLabelingJob_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "GetHyperparameterTuningJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_get_hyperparameter_tuning_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_GetHyperparameterTuningJob_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "GetHyperparameterTuningJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_get_hyperparameter_tuning_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_GetHyperparameterTuningJob_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "GetModelDeploymentMonitoringJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_get_model_deployment_monitoring_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_GetModelDeploymentMonitoringJob_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "GetModelDeploymentMonitoringJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_get_model_deployment_monitoring_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_GetModelDeploymentMonitoringJob_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "ListBatchPredictionJobs"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_list_batch_prediction_jobs_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_ListBatchPredictionJobs_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "ListBatchPredictionJobs"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_list_batch_prediction_jobs_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_ListBatchPredictionJobs_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "ListCustomJobs"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_list_custom_jobs_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_ListCustomJobs_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "ListCustomJobs"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_list_custom_jobs_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_ListCustomJobs_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "ListDataLabelingJobs"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_list_data_labeling_jobs_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_ListDataLabelingJobs_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "ListDataLabelingJobs"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_list_data_labeling_jobs_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_ListDataLabelingJobs_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "ListHyperparameterTuningJobs"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_list_hyperparameter_tuning_jobs_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_ListHyperparameterTuningJobs_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "ListHyperparameterTuningJobs"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_list_hyperparameter_tuning_jobs_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_ListHyperparameterTuningJobs_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "ListModelDeploymentMonitoringJobs"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_list_model_deployment_monitoring_jobs_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_ListModelDeploymentMonitoringJobs_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "ListModelDeploymentMonitoringJobs"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_list_model_deployment_monitoring_jobs_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_ListModelDeploymentMonitoringJobs_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "PauseModelDeploymentMonitoringJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_pause_model_deployment_monitoring_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_PauseModelDeploymentMonitoringJob_async",
+ "segments": [
+ {
+ "end": 42,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 42,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "PauseModelDeploymentMonitoringJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_pause_model_deployment_monitoring_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_PauseModelDeploymentMonitoringJob_sync",
+ "segments": [
+ {
+ "end": 42,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 42,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "ResumeModelDeploymentMonitoringJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_resume_model_deployment_monitoring_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_ResumeModelDeploymentMonitoringJob_async",
+ "segments": [
+ {
+ "end": 42,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 42,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "ResumeModelDeploymentMonitoringJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_resume_model_deployment_monitoring_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_ResumeModelDeploymentMonitoringJob_sync",
+ "segments": [
+ {
+ "end": 42,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 42,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "SearchModelDeploymentMonitoringStatsAnomalies"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_search_model_deployment_monitoring_stats_anomalies_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_SearchModelDeploymentMonitoringStatsAnomalies_async",
+ "segments": [
+ {
+ "end": 46,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 46,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 47,
+ "start": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "SearchModelDeploymentMonitoringStatsAnomalies"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_SearchModelDeploymentMonitoringStatsAnomalies_sync",
+ "segments": [
+ {
+ "end": 46,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 46,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 47,
+ "start": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "UpdateModelDeploymentMonitoringJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_update_model_deployment_monitoring_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_UpdateModelDeploymentMonitoringJob_async",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "UpdateModelDeploymentMonitoringJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_job_service_update_model_deployment_monitoring_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_JobService_UpdateModelDeploymentMonitoringJob_sync",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "AddContextArtifactsAndExecutions"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_add_context_artifacts_and_executions_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_AddContextArtifactsAndExecutions_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "AddContextArtifactsAndExecutions"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_add_context_artifacts_and_executions_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_AddContextArtifactsAndExecutions_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "AddContextChildren"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_add_context_children_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_AddContextChildren_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "AddContextChildren"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_add_context_children_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_AddContextChildren_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "AddExecutionEvents"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_add_execution_events_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_AddExecutionEvents_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "AddExecutionEvents"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_add_execution_events_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_AddExecutionEvents_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "CreateArtifact"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_create_artifact_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_CreateArtifact_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "CreateArtifact"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_create_artifact_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_CreateArtifact_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "CreateContext"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_create_context_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_CreateContext_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "CreateContext"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_create_context_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_CreateContext_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "CreateExecution"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_create_execution_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_CreateExecution_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "CreateExecution"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_create_execution_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_CreateExecution_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "CreateMetadataSchema"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_create_metadata_schema_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_CreateMetadataSchema_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "CreateMetadataSchema"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_create_metadata_schema_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_CreateMetadataSchema_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "CreateMetadataStore"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_create_metadata_store_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_CreateMetadataStore_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "CreateMetadataStore"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_create_metadata_store_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_CreateMetadataStore_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "DeleteArtifact"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_delete_artifact_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_DeleteArtifact_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "DeleteArtifact"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_delete_artifact_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_DeleteArtifact_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "DeleteContext"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_delete_context_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_DeleteContext_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "DeleteContext"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_delete_context_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_DeleteContext_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "DeleteExecution"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_delete_execution_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_DeleteExecution_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "DeleteExecution"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_delete_execution_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_DeleteExecution_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "DeleteMetadataStore"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_delete_metadata_store_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_DeleteMetadataStore_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "DeleteMetadataStore"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_delete_metadata_store_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_DeleteMetadataStore_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "GetArtifact"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_get_artifact_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_GetArtifact_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "GetArtifact"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_get_artifact_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_GetArtifact_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "GetContext"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_get_context_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_GetContext_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "GetContext"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_get_context_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_GetContext_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "GetExecution"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_get_execution_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_GetExecution_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "GetExecution"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_get_execution_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_GetExecution_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "GetMetadataSchema"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_get_metadata_schema_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_GetMetadataSchema_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "GetMetadataSchema"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_get_metadata_schema_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_GetMetadataSchema_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "GetMetadataStore"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_get_metadata_store_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_GetMetadataStore_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "GetMetadataStore"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_get_metadata_store_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_GetMetadataStore_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "ListArtifacts"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_list_artifacts_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_ListArtifacts_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "ListArtifacts"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_list_artifacts_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_ListArtifacts_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "ListContexts"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_list_contexts_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_ListContexts_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "ListContexts"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_list_contexts_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_ListContexts_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "ListExecutions"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_list_executions_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_ListExecutions_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "ListExecutions"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_list_executions_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_ListExecutions_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "ListMetadataSchemas"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_list_metadata_schemas_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_ListMetadataSchemas_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "ListMetadataSchemas"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_list_metadata_schemas_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_ListMetadataSchemas_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "ListMetadataStores"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_list_metadata_stores_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_ListMetadataStores_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "ListMetadataStores"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_list_metadata_stores_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_ListMetadataStores_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "PurgeArtifacts"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_purge_artifacts_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_PurgeArtifacts_async",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "PurgeArtifacts"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_purge_artifacts_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_PurgeArtifacts_sync",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "PurgeContexts"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_purge_contexts_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_PurgeContexts_async",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "PurgeContexts"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_purge_contexts_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_PurgeContexts_sync",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "PurgeExecutions"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_purge_executions_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_PurgeExecutions_async",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "PurgeExecutions"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_purge_executions_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_PurgeExecutions_sync",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "QueryArtifactLineageSubgraph"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_query_artifact_lineage_subgraph_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_QueryArtifactLineageSubgraph_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "QueryArtifactLineageSubgraph"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_query_artifact_lineage_subgraph_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_QueryArtifactLineageSubgraph_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "QueryContextLineageSubgraph"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_query_context_lineage_subgraph_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_QueryContextLineageSubgraph_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "QueryContextLineageSubgraph"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_query_context_lineage_subgraph_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_QueryContextLineageSubgraph_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "QueryExecutionInputsAndOutputs"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_query_execution_inputs_and_outputs_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_QueryExecutionInputsAndOutputs_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "QueryExecutionInputsAndOutputs"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_query_execution_inputs_and_outputs_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_QueryExecutionInputsAndOutputs_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "UpdateArtifact"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_update_artifact_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_UpdateArtifact_async",
+ "segments": [
+ {
+ "end": 43,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 43,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 37,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 44,
+ "start": 41,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "UpdateArtifact"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_update_artifact_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_UpdateArtifact_sync",
+ "segments": [
+ {
+ "end": 43,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 43,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 37,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 44,
+ "start": 41,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "UpdateContext"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_update_context_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_UpdateContext_async",
+ "segments": [
+ {
+ "end": 43,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 43,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 37,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 44,
+ "start": 41,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "UpdateContext"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_update_context_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_UpdateContext_sync",
+ "segments": [
+ {
+ "end": 43,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 43,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 37,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 44,
+ "start": 41,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "UpdateExecution"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_update_execution_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_UpdateExecution_async",
+ "segments": [
+ {
+ "end": 43,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 43,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 37,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 44,
+ "start": 41,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "UpdateExecution"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_metadata_service_update_execution_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MetadataService_UpdateExecution_sync",
+ "segments": [
+ {
+ "end": 43,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 43,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 37,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 44,
+ "start": 41,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MigrationService"
+ },
+ "shortName": "BatchMigrateResources"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_migration_service_batch_migrate_resources_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MigrationService_BatchMigrateResources_async",
+ "segments": [
+ {
+ "end": 54,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 54,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 51,
+ "start": 45,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 55,
+ "start": 52,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MigrationService"
+ },
+ "shortName": "BatchMigrateResources"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_migration_service_batch_migrate_resources_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MigrationService_BatchMigrateResources_sync",
+ "segments": [
+ {
+ "end": 54,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 54,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 51,
+ "start": 45,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 55,
+ "start": 52,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MigrationService"
+ },
+ "shortName": "SearchMigratableResources"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_migration_service_search_migratable_resources_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MigrationService_SearchMigratableResources_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MigrationService"
+ },
+ "shortName": "SearchMigratableResources"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_migration_service_search_migratable_resources_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_MigrationService_SearchMigratableResources_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "ModelService"
+ },
+ "shortName": "DeleteModel"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_model_service_delete_model_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_ModelService_DeleteModel_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "ModelService"
+ },
+ "shortName": "DeleteModel"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_model_service_delete_model_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_ModelService_DeleteModel_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "ModelService"
+ },
+ "shortName": "ExportModel"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_model_service_export_model_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_ModelService_ExportModel_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "ModelService"
+ },
+ "shortName": "ExportModel"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_model_service_export_model_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_ModelService_ExportModel_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "ModelService"
+ },
+ "shortName": "GetModelEvaluationSlice"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_model_service_get_model_evaluation_slice_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_ModelService_GetModelEvaluationSlice_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "ModelService"
+ },
+ "shortName": "GetModelEvaluationSlice"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_model_service_get_model_evaluation_slice_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_ModelService_GetModelEvaluationSlice_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "ModelService"
+ },
+ "shortName": "GetModelEvaluation"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_model_service_get_model_evaluation_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_ModelService_GetModelEvaluation_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "ModelService"
+ },
+ "shortName": "GetModelEvaluation"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_model_service_get_model_evaluation_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_ModelService_GetModelEvaluation_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "ModelService"
+ },
+ "shortName": "GetModel"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_model_service_get_model_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_ModelService_GetModel_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "ModelService"
+ },
+ "shortName": "GetModel"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_model_service_get_model_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_ModelService_GetModel_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "ModelService"
+ },
+ "shortName": "ListModelEvaluationSlices"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_model_service_list_model_evaluation_slices_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_ModelService_ListModelEvaluationSlices_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "ModelService"
+ },
+ "shortName": "ListModelEvaluationSlices"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_model_service_list_model_evaluation_slices_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_ModelService_ListModelEvaluationSlices_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "ModelService"
+ },
+ "shortName": "ListModelEvaluations"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_model_service_list_model_evaluations_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_ModelService_ListModelEvaluations_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "ModelService"
+ },
+ "shortName": "ListModelEvaluations"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_model_service_list_model_evaluations_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_ModelService_ListModelEvaluations_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "ModelService"
+ },
+ "shortName": "ListModels"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_model_service_list_models_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_ModelService_ListModels_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "ModelService"
+ },
+ "shortName": "ListModels"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_model_service_list_models_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_ModelService_ListModels_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "ModelService"
+ },
+ "shortName": "UpdateModel"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_model_service_update_model_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_ModelService_UpdateModel_async",
+ "segments": [
+ {
+ "end": 47,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 47,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 42,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 48,
+ "start": 45,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "ModelService"
+ },
+ "shortName": "UpdateModel"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_model_service_update_model_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_ModelService_UpdateModel_sync",
+ "segments": [
+ {
+ "end": 47,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 47,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 42,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 48,
+ "start": 45,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "ModelService"
+ },
+ "shortName": "UploadModel"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_model_service_upload_model_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_ModelService_UploadModel_async",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "ModelService"
+ },
+ "shortName": "UploadModel"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_model_service_upload_model_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_ModelService_UploadModel_sync",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "PipelineService"
+ },
+ "shortName": "CancelPipelineJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_pipeline_service_cancel_pipeline_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_PipelineService_CancelPipelineJob_async",
+ "segments": [
+ {
+ "end": 42,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 42,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "PipelineService"
+ },
+ "shortName": "CancelPipelineJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_pipeline_service_cancel_pipeline_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_PipelineService_CancelPipelineJob_sync",
+ "segments": [
+ {
+ "end": 42,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 42,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "PipelineService"
+ },
+ "shortName": "CancelTrainingPipeline"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_pipeline_service_cancel_training_pipeline_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_PipelineService_CancelTrainingPipeline_async",
+ "segments": [
+ {
+ "end": 42,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 42,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "PipelineService"
+ },
+ "shortName": "CancelTrainingPipeline"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_pipeline_service_cancel_training_pipeline_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_PipelineService_CancelTrainingPipeline_sync",
+ "segments": [
+ {
+ "end": 42,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 42,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "PipelineService"
+ },
+ "shortName": "CreatePipelineJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_pipeline_service_create_pipeline_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_PipelineService_CreatePipelineJob_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "PipelineService"
+ },
+ "shortName": "CreatePipelineJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_pipeline_service_create_pipeline_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_PipelineService_CreatePipelineJob_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "PipelineService"
+ },
+ "shortName": "CreateTrainingPipeline"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_pipeline_service_create_training_pipeline_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_PipelineService_CreateTrainingPipeline_async",
+ "segments": [
+ {
+ "end": 50,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 50,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 45,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 51,
+ "start": 48,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "PipelineService"
+ },
+ "shortName": "CreateTrainingPipeline"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_pipeline_service_create_training_pipeline_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_PipelineService_CreateTrainingPipeline_sync",
+ "segments": [
+ {
+ "end": 50,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 50,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 45,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 51,
+ "start": 48,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "PipelineService"
+ },
+ "shortName": "DeletePipelineJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_pipeline_service_delete_pipeline_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_PipelineService_DeletePipelineJob_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "PipelineService"
+ },
+ "shortName": "DeletePipelineJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_pipeline_service_delete_pipeline_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_PipelineService_DeletePipelineJob_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "PipelineService"
+ },
+ "shortName": "DeleteTrainingPipeline"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_pipeline_service_delete_training_pipeline_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_PipelineService_DeleteTrainingPipeline_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "PipelineService"
+ },
+ "shortName": "DeleteTrainingPipeline"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_pipeline_service_delete_training_pipeline_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_PipelineService_DeleteTrainingPipeline_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "PipelineService"
+ },
+ "shortName": "GetPipelineJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_pipeline_service_get_pipeline_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_PipelineService_GetPipelineJob_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "PipelineService"
+ },
+ "shortName": "GetPipelineJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_pipeline_service_get_pipeline_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_PipelineService_GetPipelineJob_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "PipelineService"
+ },
+ "shortName": "GetTrainingPipeline"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_pipeline_service_get_training_pipeline_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_PipelineService_GetTrainingPipeline_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "PipelineService"
+ },
+ "shortName": "GetTrainingPipeline"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_pipeline_service_get_training_pipeline_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_PipelineService_GetTrainingPipeline_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "PipelineService"
+ },
+ "shortName": "ListPipelineJobs"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_pipeline_service_list_pipeline_jobs_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_PipelineService_ListPipelineJobs_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "PipelineService"
+ },
+ "shortName": "ListPipelineJobs"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_pipeline_service_list_pipeline_jobs_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_PipelineService_ListPipelineJobs_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "PipelineService"
+ },
+ "shortName": "ListTrainingPipelines"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_pipeline_service_list_training_pipelines_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_PipelineService_ListTrainingPipelines_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "PipelineService"
+ },
+ "shortName": "ListTrainingPipelines"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_pipeline_service_list_training_pipelines_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_PipelineService_ListTrainingPipelines_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "PredictionService"
+ },
+ "shortName": "Explain"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_prediction_service_explain_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_PredictionService_Explain_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "PredictionService"
+ },
+ "shortName": "Explain"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_prediction_service_explain_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_PredictionService_Explain_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "PredictionService"
+ },
+ "shortName": "Predict"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_prediction_service_predict_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_PredictionService_Predict_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "PredictionService"
+ },
+ "shortName": "Predict"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_prediction_service_predict_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_PredictionService_Predict_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "PredictionService"
+ },
+ "shortName": "RawPredict"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_prediction_service_raw_predict_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_PredictionService_RawPredict_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "PredictionService"
+ },
+ "shortName": "RawPredict"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_prediction_service_raw_predict_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_PredictionService_RawPredict_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "SpecialistPoolService"
+ },
+ "shortName": "CreateSpecialistPool"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_specialist_pool_service_create_specialist_pool_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_SpecialistPoolService_CreateSpecialistPool_async",
+ "segments": [
+ {
+ "end": 53,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 53,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 54,
+ "start": 51,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "SpecialistPoolService"
+ },
+ "shortName": "CreateSpecialistPool"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_specialist_pool_service_create_specialist_pool_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_SpecialistPoolService_CreateSpecialistPool_sync",
+ "segments": [
+ {
+ "end": 53,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 53,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 54,
+ "start": 51,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "SpecialistPoolService"
+ },
+ "shortName": "DeleteSpecialistPool"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_specialist_pool_service_delete_specialist_pool_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_SpecialistPoolService_DeleteSpecialistPool_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "SpecialistPoolService"
+ },
+ "shortName": "DeleteSpecialistPool"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_specialist_pool_service_delete_specialist_pool_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_SpecialistPoolService_DeleteSpecialistPool_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "SpecialistPoolService"
+ },
+ "shortName": "GetSpecialistPool"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_specialist_pool_service_get_specialist_pool_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_SpecialistPoolService_GetSpecialistPool_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "SpecialistPoolService"
+ },
+ "shortName": "GetSpecialistPool"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_specialist_pool_service_get_specialist_pool_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_SpecialistPoolService_GetSpecialistPool_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "SpecialistPoolService"
+ },
+ "shortName": "ListSpecialistPools"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_specialist_pool_service_list_specialist_pools_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_SpecialistPoolService_ListSpecialistPools_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "SpecialistPoolService"
+ },
+ "shortName": "ListSpecialistPools"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_specialist_pool_service_list_specialist_pools_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_SpecialistPoolService_ListSpecialistPools_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "SpecialistPoolService"
+ },
+ "shortName": "UpdateSpecialistPool"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_specialist_pool_service_update_specialist_pool_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_SpecialistPoolService_UpdateSpecialistPool_async",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "SpecialistPoolService"
+ },
+ "shortName": "UpdateSpecialistPool"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_specialist_pool_service_update_specialist_pool_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_SpecialistPoolService_UpdateSpecialistPool_sync",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "BatchCreateTensorboardRuns"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_batch_create_tensorboard_runs_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_BatchCreateTensorboardRuns_async",
+ "segments": [
+ {
+ "end": 50,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 50,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 45,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 51,
+ "start": 48,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "BatchCreateTensorboardRuns"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_batch_create_tensorboard_runs_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_BatchCreateTensorboardRuns_sync",
+ "segments": [
+ {
+ "end": 50,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 50,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 45,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 51,
+ "start": 48,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "BatchCreateTensorboardTimeSeries"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_batch_create_tensorboard_time_series_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_BatchCreateTensorboardTimeSeries_async",
+ "segments": [
+ {
+ "end": 50,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 50,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 45,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 51,
+ "start": 48,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "BatchCreateTensorboardTimeSeries"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_batch_create_tensorboard_time_series_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_BatchCreateTensorboardTimeSeries_sync",
+ "segments": [
+ {
+ "end": 50,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 50,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 45,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 51,
+ "start": 48,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "BatchReadTensorboardTimeSeriesData"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_batch_read_tensorboard_time_series_data_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_BatchReadTensorboardTimeSeriesData_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "BatchReadTensorboardTimeSeriesData"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_batch_read_tensorboard_time_series_data_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_BatchReadTensorboardTimeSeriesData_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "CreateTensorboardExperiment"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_create_tensorboard_experiment_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_CreateTensorboardExperiment_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "CreateTensorboardExperiment"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_create_tensorboard_experiment_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_CreateTensorboardExperiment_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "CreateTensorboardRun"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_create_tensorboard_run_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_CreateTensorboardRun_async",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "CreateTensorboardRun"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_create_tensorboard_run_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_CreateTensorboardRun_sync",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "CreateTensorboardTimeSeries"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_create_tensorboard_time_series_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_CreateTensorboardTimeSeries_async",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "CreateTensorboardTimeSeries"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_create_tensorboard_time_series_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_CreateTensorboardTimeSeries_sync",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "CreateTensorboard"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_create_tensorboard_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_CreateTensorboard_async",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "CreateTensorboard"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_create_tensorboard_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_CreateTensorboard_sync",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "DeleteTensorboardExperiment"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_delete_tensorboard_experiment_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_DeleteTensorboardExperiment_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "DeleteTensorboardExperiment"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_delete_tensorboard_experiment_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_DeleteTensorboardExperiment_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "DeleteTensorboardRun"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_delete_tensorboard_run_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_DeleteTensorboardRun_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "DeleteTensorboardRun"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_delete_tensorboard_run_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_DeleteTensorboardRun_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "DeleteTensorboardTimeSeries"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_delete_tensorboard_time_series_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_DeleteTensorboardTimeSeries_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "DeleteTensorboardTimeSeries"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_delete_tensorboard_time_series_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_DeleteTensorboardTimeSeries_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "DeleteTensorboard"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_delete_tensorboard_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_DeleteTensorboard_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "DeleteTensorboard"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_delete_tensorboard_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_DeleteTensorboard_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "ExportTensorboardTimeSeriesData"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_export_tensorboard_time_series_data_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_ExportTensorboardTimeSeriesData_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "ExportTensorboardTimeSeriesData"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_export_tensorboard_time_series_data_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_ExportTensorboardTimeSeriesData_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "GetTensorboardExperiment"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_get_tensorboard_experiment_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_GetTensorboardExperiment_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "GetTensorboardExperiment"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_get_tensorboard_experiment_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_GetTensorboardExperiment_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "GetTensorboardRun"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_get_tensorboard_run_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_GetTensorboardRun_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "GetTensorboardRun"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_get_tensorboard_run_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_GetTensorboardRun_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "GetTensorboardTimeSeries"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_get_tensorboard_time_series_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_GetTensorboardTimeSeries_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "GetTensorboardTimeSeries"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_get_tensorboard_time_series_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_GetTensorboardTimeSeries_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "GetTensorboard"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_get_tensorboard_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_GetTensorboard_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "GetTensorboard"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_get_tensorboard_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_GetTensorboard_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "ListTensorboardExperiments"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_list_tensorboard_experiments_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_ListTensorboardExperiments_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "ListTensorboardExperiments"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_list_tensorboard_experiments_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_ListTensorboardExperiments_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "ListTensorboardRuns"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_list_tensorboard_runs_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_ListTensorboardRuns_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "ListTensorboardRuns"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_list_tensorboard_runs_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_ListTensorboardRuns_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "ListTensorboardTimeSeries"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_list_tensorboard_time_series_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_ListTensorboardTimeSeries_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "ListTensorboardTimeSeries"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_list_tensorboard_time_series_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_ListTensorboardTimeSeries_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "ListTensorboards"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_list_tensorboards_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_ListTensorboards_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "ListTensorboards"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_list_tensorboards_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_ListTensorboards_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "ReadTensorboardBlobData"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_read_tensorboard_blob_data_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_ReadTensorboardBlobData_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "ReadTensorboardBlobData"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_read_tensorboard_blob_data_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_ReadTensorboardBlobData_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "ReadTensorboardTimeSeriesData"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_read_tensorboard_time_series_data_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_ReadTensorboardTimeSeriesData_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "ReadTensorboardTimeSeriesData"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_read_tensorboard_time_series_data_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_ReadTensorboardTimeSeriesData_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "UpdateTensorboardExperiment"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_experiment_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_UpdateTensorboardExperiment_async",
+ "segments": [
+ {
+ "end": 43,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 43,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 37,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 44,
+ "start": 41,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "UpdateTensorboardExperiment"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_experiment_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_UpdateTensorboardExperiment_sync",
+ "segments": [
+ {
+ "end": 43,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 43,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 37,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 44,
+ "start": 41,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "UpdateTensorboardRun"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_run_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_UpdateTensorboardRun_async",
+ "segments": [
+ {
+ "end": 47,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 47,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 42,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 48,
+ "start": 45,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "UpdateTensorboardRun"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_run_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_UpdateTensorboardRun_sync",
+ "segments": [
+ {
+ "end": 47,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 47,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 42,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 48,
+ "start": 45,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "UpdateTensorboardTimeSeries"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_time_series_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_UpdateTensorboardTimeSeries_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "UpdateTensorboardTimeSeries"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_time_series_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_UpdateTensorboardTimeSeries_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "UpdateTensorboard"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_UpdateTensorboard_async",
+ "segments": [
+ {
+ "end": 51,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 51,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 42,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 52,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "UpdateTensorboard"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_update_tensorboard_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_UpdateTensorboard_sync",
+ "segments": [
+ {
+ "end": 51,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 51,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 42,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 52,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "WriteTensorboardExperimentData"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_write_tensorboard_experiment_data_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_WriteTensorboardExperimentData_async",
+ "segments": [
+ {
+ "end": 50,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 50,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 45,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 51,
+ "start": 48,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "WriteTensorboardExperimentData"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_write_tensorboard_experiment_data_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_WriteTensorboardExperimentData_sync",
+ "segments": [
+ {
+ "end": 50,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 50,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 45,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 51,
+ "start": 48,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "WriteTensorboardRunData"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_write_tensorboard_run_data_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_WriteTensorboardRunData_async",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "WriteTensorboardRunData"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_tensorboard_service_write_tensorboard_run_data_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_TensorboardService_WriteTensorboardRunData_sync",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "AddTrialMeasurement"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_vizier_service_add_trial_measurement_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_VizierService_AddTrialMeasurement_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "AddTrialMeasurement"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_vizier_service_add_trial_measurement_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_VizierService_AddTrialMeasurement_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "CheckTrialEarlyStoppingState"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_vizier_service_check_trial_early_stopping_state_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_VizierService_CheckTrialEarlyStoppingState_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "CheckTrialEarlyStoppingState"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_vizier_service_check_trial_early_stopping_state_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_VizierService_CheckTrialEarlyStoppingState_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "CompleteTrial"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_vizier_service_complete_trial_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_VizierService_CompleteTrial_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "CompleteTrial"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_vizier_service_complete_trial_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_VizierService_CompleteTrial_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "CreateStudy"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_vizier_service_create_study_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_VizierService_CreateStudy_async",
+ "segments": [
+ {
+ "end": 53,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 53,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 48,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 54,
+ "start": 51,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "CreateStudy"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_vizier_service_create_study_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_VizierService_CreateStudy_sync",
+ "segments": [
+ {
+ "end": 53,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 53,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 48,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 54,
+ "start": 51,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "CreateTrial"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_vizier_service_create_trial_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_VizierService_CreateTrial_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "CreateTrial"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_vizier_service_create_trial_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_VizierService_CreateTrial_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "DeleteStudy"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_vizier_service_delete_study_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_VizierService_DeleteStudy_async",
+ "segments": [
+ {
+ "end": 42,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 42,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "DeleteStudy"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_vizier_service_delete_study_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_VizierService_DeleteStudy_sync",
+ "segments": [
+ {
+ "end": 42,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 42,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "DeleteTrial"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_vizier_service_delete_trial_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_VizierService_DeleteTrial_async",
+ "segments": [
+ {
+ "end": 42,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 42,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "DeleteTrial"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_vizier_service_delete_trial_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_VizierService_DeleteTrial_sync",
+ "segments": [
+ {
+ "end": 42,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 42,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "GetStudy"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_vizier_service_get_study_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_VizierService_GetStudy_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "GetStudy"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_vizier_service_get_study_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_VizierService_GetStudy_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "GetTrial"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_vizier_service_get_trial_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_VizierService_GetTrial_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "GetTrial"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_vizier_service_get_trial_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_VizierService_GetTrial_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "ListOptimalTrials"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_vizier_service_list_optimal_trials_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_VizierService_ListOptimalTrials_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "ListOptimalTrials"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_vizier_service_list_optimal_trials_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_VizierService_ListOptimalTrials_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "ListStudies"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_vizier_service_list_studies_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_VizierService_ListStudies_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "ListStudies"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_vizier_service_list_studies_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_VizierService_ListStudies_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "ListTrials"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_vizier_service_list_trials_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_VizierService_ListTrials_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "ListTrials"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_vizier_service_list_trials_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_VizierService_ListTrials_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "LookupStudy"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_vizier_service_lookup_study_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_VizierService_LookupStudy_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "LookupStudy"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_vizier_service_lookup_study_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_VizierService_LookupStudy_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "StopTrial"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_vizier_service_stop_trial_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_VizierService_StopTrial_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "StopTrial"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_vizier_service_stop_trial_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_VizierService_StopTrial_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "SuggestTrials"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_vizier_service_suggest_trials_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_VizierService_SuggestTrials_async",
+ "segments": [
+ {
+ "end": 50,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 50,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 40,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 41,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 51,
+ "start": 48,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "SuggestTrials"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1_vizier_service_suggest_trials_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1_VizierService_SuggestTrials_sync",
+ "segments": [
+ {
+ "end": 50,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 50,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 40,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 41,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 51,
+ "start": 48,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ }
+ ]
+}
diff --git a/samples/generated_samples/snippet_metadata_aiplatform_v1beta1.json b/samples/generated_samples/snippet_metadata_aiplatform_v1beta1.json
new file mode 100644
index 0000000000..e3ea85a756
--- /dev/null
+++ b/samples/generated_samples/snippet_metadata_aiplatform_v1beta1.json
@@ -0,0 +1,16340 @@
+{
+ "snippets": [
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "DatasetService"
+ },
+ "shortName": "CreateDataset"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_dataset_service_create_dataset_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_DatasetService_CreateDataset_async",
+ "segments": [
+ {
+ "end": 54,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 54,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 51,
+ "start": 45,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 55,
+ "start": 52,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "DatasetService"
+ },
+ "shortName": "CreateDataset"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_dataset_service_create_dataset_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_DatasetService_CreateDataset_sync",
+ "segments": [
+ {
+ "end": 54,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 54,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 51,
+ "start": 45,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 55,
+ "start": 52,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "DatasetService"
+ },
+ "shortName": "DeleteDataset"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_dataset_service_delete_dataset_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_DatasetService_DeleteDataset_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "DatasetService"
+ },
+ "shortName": "DeleteDataset"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_dataset_service_delete_dataset_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_DatasetService_DeleteDataset_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "DatasetService"
+ },
+ "shortName": "ExportData"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_dataset_service_export_data_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_DatasetService_ExportData_async",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "DatasetService"
+ },
+ "shortName": "ExportData"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_dataset_service_export_data_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_DatasetService_ExportData_sync",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "DatasetService"
+ },
+ "shortName": "GetAnnotationSpec"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_dataset_service_get_annotation_spec_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_DatasetService_GetAnnotationSpec_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "DatasetService"
+ },
+ "shortName": "GetAnnotationSpec"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_dataset_service_get_annotation_spec_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_DatasetService_GetAnnotationSpec_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "DatasetService"
+ },
+ "shortName": "GetDataset"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_dataset_service_get_dataset_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_DatasetService_GetDataset_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "DatasetService"
+ },
+ "shortName": "GetDataset"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_dataset_service_get_dataset_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_DatasetService_GetDataset_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "DatasetService"
+ },
+ "shortName": "ImportData"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_dataset_service_import_data_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_DatasetService_ImportData_async",
+ "segments": [
+ {
+ "end": 53,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 53,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 54,
+ "start": 51,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "DatasetService"
+ },
+ "shortName": "ImportData"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_dataset_service_import_data_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_DatasetService_ImportData_sync",
+ "segments": [
+ {
+ "end": 53,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 53,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 54,
+ "start": 51,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "DatasetService"
+ },
+ "shortName": "ListAnnotations"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_dataset_service_list_annotations_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_DatasetService_ListAnnotations_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "DatasetService"
+ },
+ "shortName": "ListAnnotations"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_dataset_service_list_annotations_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_DatasetService_ListAnnotations_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "DatasetService"
+ },
+ "shortName": "ListDataItems"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_dataset_service_list_data_items_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_DatasetService_ListDataItems_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "DatasetService"
+ },
+ "shortName": "ListDataItems"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_dataset_service_list_data_items_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_DatasetService_ListDataItems_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "DatasetService"
+ },
+ "shortName": "ListDatasets"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_dataset_service_list_datasets_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_DatasetService_ListDatasets_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "DatasetService"
+ },
+ "shortName": "ListDatasets"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_dataset_service_list_datasets_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_DatasetService_ListDatasets_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "DatasetService"
+ },
+ "shortName": "UpdateDataset"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_dataset_service_update_dataset_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_DatasetService_UpdateDataset_async",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "DatasetService"
+ },
+ "shortName": "UpdateDataset"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_dataset_service_update_dataset_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_DatasetService_UpdateDataset_sync",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "EndpointService"
+ },
+ "shortName": "CreateEndpoint"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_endpoint_service_create_endpoint_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_EndpointService_CreateEndpoint_async",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "EndpointService"
+ },
+ "shortName": "CreateEndpoint"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_endpoint_service_create_endpoint_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_EndpointService_CreateEndpoint_sync",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "EndpointService"
+ },
+ "shortName": "DeleteEndpoint"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_endpoint_service_delete_endpoint_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_EndpointService_DeleteEndpoint_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "EndpointService"
+ },
+ "shortName": "DeleteEndpoint"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_endpoint_service_delete_endpoint_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_EndpointService_DeleteEndpoint_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "EndpointService"
+ },
+ "shortName": "DeployModel"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_endpoint_service_deploy_model_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_EndpointService_DeployModel_async",
+ "segments": [
+ {
+ "end": 53,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 53,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 54,
+ "start": 51,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "EndpointService"
+ },
+ "shortName": "DeployModel"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_endpoint_service_deploy_model_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_EndpointService_DeployModel_sync",
+ "segments": [
+ {
+ "end": 53,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 53,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 54,
+ "start": 51,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "EndpointService"
+ },
+ "shortName": "GetEndpoint"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_endpoint_service_get_endpoint_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_EndpointService_GetEndpoint_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "EndpointService"
+ },
+ "shortName": "GetEndpoint"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_endpoint_service_get_endpoint_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_EndpointService_GetEndpoint_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "EndpointService"
+ },
+ "shortName": "ListEndpoints"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_endpoint_service_list_endpoints_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_EndpointService_ListEndpoints_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "EndpointService"
+ },
+ "shortName": "ListEndpoints"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_endpoint_service_list_endpoints_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_EndpointService_ListEndpoints_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "EndpointService"
+ },
+ "shortName": "UndeployModel"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_endpoint_service_undeploy_model_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_EndpointService_UndeployModel_async",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "EndpointService"
+ },
+ "shortName": "UndeployModel"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_endpoint_service_undeploy_model_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_EndpointService_UndeployModel_sync",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "EndpointService"
+ },
+ "shortName": "UpdateEndpoint"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_endpoint_service_update_endpoint_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_EndpointService_UpdateEndpoint_async",
+ "segments": [
+ {
+ "end": 47,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 47,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 42,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 48,
+ "start": 45,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "EndpointService"
+ },
+ "shortName": "UpdateEndpoint"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_endpoint_service_update_endpoint_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_EndpointService_UpdateEndpoint_sync",
+ "segments": [
+ {
+ "end": 47,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 47,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 42,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 48,
+ "start": 45,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreOnlineServingService"
+ },
+ "shortName": "ReadFeatureValues"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_online_serving_service_read_feature_values_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreOnlineServingService_ReadFeatureValues_async",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreOnlineServingService"
+ },
+ "shortName": "ReadFeatureValues"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_online_serving_service_read_feature_values_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreOnlineServingService_ReadFeatureValues_sync",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreOnlineServingService"
+ },
+ "shortName": "StreamingReadFeatureValues"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_online_serving_service_streaming_read_feature_values_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreOnlineServingService_StreamingReadFeatureValues_async",
+ "segments": [
+ {
+ "end": 50,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 50,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 51,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreOnlineServingService"
+ },
+ "shortName": "StreamingReadFeatureValues"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_online_serving_service_streaming_read_feature_values_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreOnlineServingService_StreamingReadFeatureValues_sync",
+ "segments": [
+ {
+ "end": 50,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 50,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 51,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "BatchCreateFeatures"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_service_batch_create_features_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_BatchCreateFeatures_async",
+ "segments": [
+ {
+ "end": 54,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 54,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 51,
+ "start": 45,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 55,
+ "start": 52,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "BatchCreateFeatures"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_service_batch_create_features_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_BatchCreateFeatures_sync",
+ "segments": [
+ {
+ "end": 54,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 54,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 51,
+ "start": 45,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 55,
+ "start": 52,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "BatchReadFeatureValues"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_service_batch_read_feature_values_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_BatchReadFeatureValues_async",
+ "segments": [
+ {
+ "end": 61,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 61,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 51,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 58,
+ "start": 52,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 62,
+ "start": 59,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "BatchReadFeatureValues"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_service_batch_read_feature_values_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_BatchReadFeatureValues_sync",
+ "segments": [
+ {
+ "end": 61,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 61,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 51,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 58,
+ "start": 52,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 62,
+ "start": 59,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "CreateEntityType"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_entity_type_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_CreateEntityType_async",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "CreateEntityType"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_entity_type_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_CreateEntityType_sync",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "CreateFeature"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_feature_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_CreateFeature_async",
+ "segments": [
+ {
+ "end": 53,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 53,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 54,
+ "start": 51,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "CreateFeature"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_feature_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_CreateFeature_sync",
+ "segments": [
+ {
+ "end": 53,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 53,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 54,
+ "start": 51,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "CreateFeaturestore"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_featurestore_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_CreateFeaturestore_async",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "CreateFeaturestore"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_service_create_featurestore_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_CreateFeaturestore_sync",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "DeleteEntityType"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_entity_type_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_DeleteEntityType_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "DeleteEntityType"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_entity_type_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_DeleteEntityType_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "DeleteFeature"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_feature_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_DeleteFeature_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "DeleteFeature"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_feature_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_DeleteFeature_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "DeleteFeaturestore"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_featurestore_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_DeleteFeaturestore_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "DeleteFeaturestore"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_service_delete_featurestore_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_DeleteFeaturestore_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "ExportFeatureValues"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_service_export_feature_values_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_ExportFeatureValues_async",
+ "segments": [
+ {
+ "end": 56,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 56,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 53,
+ "start": 47,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 57,
+ "start": 54,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "ExportFeatureValues"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_service_export_feature_values_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_ExportFeatureValues_sync",
+ "segments": [
+ {
+ "end": 56,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 56,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 53,
+ "start": 47,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 57,
+ "start": 54,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "GetEntityType"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_entity_type_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_GetEntityType_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "GetEntityType"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_entity_type_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_GetEntityType_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "GetFeature"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_feature_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_GetFeature_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "GetFeature"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_feature_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_GetFeature_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "GetFeaturestore"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_featurestore_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_GetFeaturestore_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "GetFeaturestore"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_service_get_featurestore_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_GetFeaturestore_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "ImportFeatureValues"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_service_import_feature_values_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_ImportFeatureValues_async",
+ "segments": [
+ {
+ "end": 57,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 57,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 54,
+ "start": 48,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 58,
+ "start": 55,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "ImportFeatureValues"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_service_import_feature_values_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_ImportFeatureValues_sync",
+ "segments": [
+ {
+ "end": 57,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 57,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 54,
+ "start": 48,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 58,
+ "start": 55,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "ListEntityTypes"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_entity_types_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_ListEntityTypes_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "ListEntityTypes"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_entity_types_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_ListEntityTypes_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "ListFeatures"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_features_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_ListFeatures_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "ListFeatures"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_features_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_ListFeatures_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "ListFeaturestores"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_featurestores_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_ListFeaturestores_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "ListFeaturestores"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_service_list_featurestores_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_ListFeaturestores_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "SearchFeatures"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_service_search_features_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_SearchFeatures_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "SearchFeatures"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_service_search_features_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_SearchFeatures_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "UpdateEntityType"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_entity_type_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_UpdateEntityType_async",
+ "segments": [
+ {
+ "end": 43,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 43,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 37,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 44,
+ "start": 41,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "UpdateEntityType"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_entity_type_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_UpdateEntityType_sync",
+ "segments": [
+ {
+ "end": 43,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 43,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 37,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 44,
+ "start": 41,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "UpdateFeature"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_feature_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_UpdateFeature_async",
+ "segments": [
+ {
+ "end": 47,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 47,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 42,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 48,
+ "start": 45,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "UpdateFeature"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_feature_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_UpdateFeature_sync",
+ "segments": [
+ {
+ "end": 47,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 47,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 42,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 48,
+ "start": 45,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "UpdateFeaturestore"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_featurestore_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_UpdateFeaturestore_async",
+ "segments": [
+ {
+ "end": 47,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 47,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 37,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 38,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 48,
+ "start": 45,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "FeaturestoreService"
+ },
+ "shortName": "UpdateFeaturestore"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_featurestore_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_UpdateFeaturestore_sync",
+ "segments": [
+ {
+ "end": 47,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 47,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 37,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 38,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 48,
+ "start": 45,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "IndexEndpointService"
+ },
+ "shortName": "CreateIndexEndpoint"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_create_index_endpoint_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_CreateIndexEndpoint_async",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "IndexEndpointService"
+ },
+ "shortName": "CreateIndexEndpoint"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_create_index_endpoint_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_CreateIndexEndpoint_sync",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "IndexEndpointService"
+ },
+ "shortName": "DeleteIndexEndpoint"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_delete_index_endpoint_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_DeleteIndexEndpoint_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "IndexEndpointService"
+ },
+ "shortName": "DeleteIndexEndpoint"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_delete_index_endpoint_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_DeleteIndexEndpoint_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "IndexEndpointService"
+ },
+ "shortName": "DeployIndex"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_deploy_index_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_DeployIndex_async",
+ "segments": [
+ {
+ "end": 53,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 53,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 54,
+ "start": 51,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "IndexEndpointService"
+ },
+ "shortName": "DeployIndex"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_deploy_index_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_DeployIndex_sync",
+ "segments": [
+ {
+ "end": 53,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 53,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 54,
+ "start": 51,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "IndexEndpointService"
+ },
+ "shortName": "GetIndexEndpoint"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_get_index_endpoint_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_GetIndexEndpoint_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "IndexEndpointService"
+ },
+ "shortName": "GetIndexEndpoint"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_get_index_endpoint_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_GetIndexEndpoint_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "IndexEndpointService"
+ },
+ "shortName": "ListIndexEndpoints"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_list_index_endpoints_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_ListIndexEndpoints_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "IndexEndpointService"
+ },
+ "shortName": "ListIndexEndpoints"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_list_index_endpoints_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_ListIndexEndpoints_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "IndexEndpointService"
+ },
+ "shortName": "MutateDeployedIndex"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_mutate_deployed_index_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_MutateDeployedIndex_async",
+ "segments": [
+ {
+ "end": 53,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 53,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 54,
+ "start": 51,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "IndexEndpointService"
+ },
+ "shortName": "MutateDeployedIndex"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_mutate_deployed_index_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_MutateDeployedIndex_sync",
+ "segments": [
+ {
+ "end": 53,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 53,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 54,
+ "start": 51,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "IndexEndpointService"
+ },
+ "shortName": "UndeployIndex"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_undeploy_index_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_UndeployIndex_async",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "IndexEndpointService"
+ },
+ "shortName": "UndeployIndex"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_undeploy_index_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_UndeployIndex_sync",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "IndexEndpointService"
+ },
+ "shortName": "UpdateIndexEndpoint"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_update_index_endpoint_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_UpdateIndexEndpoint_async",
+ "segments": [
+ {
+ "end": 47,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 47,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 42,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 48,
+ "start": 45,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "IndexEndpointService"
+ },
+ "shortName": "UpdateIndexEndpoint"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_index_endpoint_service_update_index_endpoint_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_IndexEndpointService_UpdateIndexEndpoint_sync",
+ "segments": [
+ {
+ "end": 47,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 47,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 42,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 48,
+ "start": 45,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "IndexService"
+ },
+ "shortName": "CreateIndex"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_index_service_create_index_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_IndexService_CreateIndex_async",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "IndexService"
+ },
+ "shortName": "CreateIndex"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_index_service_create_index_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_IndexService_CreateIndex_sync",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "IndexService"
+ },
+ "shortName": "DeleteIndex"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_index_service_delete_index_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_IndexService_DeleteIndex_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "IndexService"
+ },
+ "shortName": "DeleteIndex"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_index_service_delete_index_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_IndexService_DeleteIndex_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "IndexService"
+ },
+ "shortName": "GetIndex"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_index_service_get_index_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_IndexService_GetIndex_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "IndexService"
+ },
+ "shortName": "GetIndex"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_index_service_get_index_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_IndexService_GetIndex_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "IndexService"
+ },
+ "shortName": "ListIndexes"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_index_service_list_indexes_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_IndexService_ListIndexes_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "IndexService"
+ },
+ "shortName": "ListIndexes"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_index_service_list_indexes_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_IndexService_ListIndexes_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "IndexService"
+ },
+ "shortName": "UpdateIndex"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_index_service_update_index_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_IndexService_UpdateIndex_async",
+ "segments": [
+ {
+ "end": 51,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 51,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 42,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 52,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "IndexService"
+ },
+ "shortName": "UpdateIndex"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_index_service_update_index_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_IndexService_UpdateIndex_sync",
+ "segments": [
+ {
+ "end": 51,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 51,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 42,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 52,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "CancelBatchPredictionJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_cancel_batch_prediction_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_CancelBatchPredictionJob_async",
+ "segments": [
+ {
+ "end": 42,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 42,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "CancelBatchPredictionJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_cancel_batch_prediction_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_CancelBatchPredictionJob_sync",
+ "segments": [
+ {
+ "end": 42,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 42,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "CancelCustomJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_cancel_custom_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_CancelCustomJob_async",
+ "segments": [
+ {
+ "end": 42,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 42,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "CancelCustomJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_cancel_custom_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_CancelCustomJob_sync",
+ "segments": [
+ {
+ "end": 42,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 42,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "CancelDataLabelingJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_cancel_data_labeling_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_CancelDataLabelingJob_async",
+ "segments": [
+ {
+ "end": 42,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 42,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "CancelDataLabelingJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_cancel_data_labeling_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_CancelDataLabelingJob_sync",
+ "segments": [
+ {
+ "end": 42,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 42,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "CancelHyperparameterTuningJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_cancel_hyperparameter_tuning_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_CancelHyperparameterTuningJob_async",
+ "segments": [
+ {
+ "end": 42,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 42,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "CancelHyperparameterTuningJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_cancel_hyperparameter_tuning_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_CancelHyperparameterTuningJob_sync",
+ "segments": [
+ {
+ "end": 42,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 42,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "CreateBatchPredictionJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_create_batch_prediction_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_CreateBatchPredictionJob_async",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 47,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "CreateBatchPredictionJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_create_batch_prediction_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_CreateBatchPredictionJob_sync",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 47,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "CreateCustomJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_create_custom_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_CreateCustomJob_async",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "CreateCustomJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_create_custom_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_CreateCustomJob_sync",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "CreateDataLabelingJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_create_data_labeling_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_CreateDataLabelingJob_async",
+ "segments": [
+ {
+ "end": 53,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 53,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 48,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 54,
+ "start": 51,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "CreateDataLabelingJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_create_data_labeling_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_CreateDataLabelingJob_sync",
+ "segments": [
+ {
+ "end": 53,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 53,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 48,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 54,
+ "start": 51,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "CreateHyperparameterTuningJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_create_hyperparameter_tuning_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_CreateHyperparameterTuningJob_async",
+ "segments": [
+ {
+ "end": 56,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 56,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 53,
+ "start": 51,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 57,
+ "start": 54,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "CreateHyperparameterTuningJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_create_hyperparameter_tuning_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_CreateHyperparameterTuningJob_sync",
+ "segments": [
+ {
+ "end": 56,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 56,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 53,
+ "start": 51,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 57,
+ "start": 54,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "CreateModelDeploymentMonitoringJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_create_model_deployment_monitoring_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_CreateModelDeploymentMonitoringJob_async",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "CreateModelDeploymentMonitoringJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_create_model_deployment_monitoring_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_CreateModelDeploymentMonitoringJob_sync",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "DeleteBatchPredictionJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_delete_batch_prediction_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_DeleteBatchPredictionJob_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "DeleteBatchPredictionJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_delete_batch_prediction_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_DeleteBatchPredictionJob_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "DeleteCustomJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_delete_custom_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_DeleteCustomJob_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "DeleteCustomJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_delete_custom_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_DeleteCustomJob_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "DeleteDataLabelingJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_delete_data_labeling_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_DeleteDataLabelingJob_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "DeleteDataLabelingJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_delete_data_labeling_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_DeleteDataLabelingJob_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "DeleteHyperparameterTuningJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_delete_hyperparameter_tuning_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_DeleteHyperparameterTuningJob_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "DeleteHyperparameterTuningJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_delete_hyperparameter_tuning_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_DeleteHyperparameterTuningJob_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "DeleteModelDeploymentMonitoringJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_delete_model_deployment_monitoring_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_DeleteModelDeploymentMonitoringJob_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "DeleteModelDeploymentMonitoringJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_delete_model_deployment_monitoring_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_DeleteModelDeploymentMonitoringJob_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "GetBatchPredictionJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_get_batch_prediction_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_GetBatchPredictionJob_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "GetBatchPredictionJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_get_batch_prediction_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_GetBatchPredictionJob_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "GetCustomJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_get_custom_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_GetCustomJob_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "GetCustomJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_get_custom_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_GetCustomJob_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "GetDataLabelingJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_get_data_labeling_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_GetDataLabelingJob_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "GetDataLabelingJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_get_data_labeling_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_GetDataLabelingJob_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "GetHyperparameterTuningJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_get_hyperparameter_tuning_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_GetHyperparameterTuningJob_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "GetHyperparameterTuningJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_get_hyperparameter_tuning_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_GetHyperparameterTuningJob_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "GetModelDeploymentMonitoringJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_get_model_deployment_monitoring_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_GetModelDeploymentMonitoringJob_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "GetModelDeploymentMonitoringJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_get_model_deployment_monitoring_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_GetModelDeploymentMonitoringJob_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "ListBatchPredictionJobs"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_list_batch_prediction_jobs_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_ListBatchPredictionJobs_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "ListBatchPredictionJobs"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_list_batch_prediction_jobs_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_ListBatchPredictionJobs_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "ListCustomJobs"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_list_custom_jobs_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_ListCustomJobs_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "ListCustomJobs"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_list_custom_jobs_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_ListCustomJobs_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "ListDataLabelingJobs"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_list_data_labeling_jobs_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_ListDataLabelingJobs_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "ListDataLabelingJobs"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_list_data_labeling_jobs_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_ListDataLabelingJobs_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "ListHyperparameterTuningJobs"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_list_hyperparameter_tuning_jobs_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_ListHyperparameterTuningJobs_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "ListHyperparameterTuningJobs"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_list_hyperparameter_tuning_jobs_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_ListHyperparameterTuningJobs_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "ListModelDeploymentMonitoringJobs"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_list_model_deployment_monitoring_jobs_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_ListModelDeploymentMonitoringJobs_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "ListModelDeploymentMonitoringJobs"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_list_model_deployment_monitoring_jobs_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_ListModelDeploymentMonitoringJobs_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "PauseModelDeploymentMonitoringJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_pause_model_deployment_monitoring_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_PauseModelDeploymentMonitoringJob_async",
+ "segments": [
+ {
+ "end": 42,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 42,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "PauseModelDeploymentMonitoringJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_pause_model_deployment_monitoring_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_PauseModelDeploymentMonitoringJob_sync",
+ "segments": [
+ {
+ "end": 42,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 42,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "ResumeModelDeploymentMonitoringJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_resume_model_deployment_monitoring_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_ResumeModelDeploymentMonitoringJob_async",
+ "segments": [
+ {
+ "end": 42,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 42,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "ResumeModelDeploymentMonitoringJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_resume_model_deployment_monitoring_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_ResumeModelDeploymentMonitoringJob_sync",
+ "segments": [
+ {
+ "end": 42,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 42,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "SearchModelDeploymentMonitoringStatsAnomalies"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_search_model_deployment_monitoring_stats_anomalies_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_SearchModelDeploymentMonitoringStatsAnomalies_async",
+ "segments": [
+ {
+ "end": 46,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 46,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 47,
+ "start": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "SearchModelDeploymentMonitoringStatsAnomalies"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_SearchModelDeploymentMonitoringStatsAnomalies_sync",
+ "segments": [
+ {
+ "end": 46,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 46,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 47,
+ "start": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "UpdateModelDeploymentMonitoringJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_update_model_deployment_monitoring_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_UpdateModelDeploymentMonitoringJob_async",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "JobService"
+ },
+ "shortName": "UpdateModelDeploymentMonitoringJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_job_service_update_model_deployment_monitoring_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_JobService_UpdateModelDeploymentMonitoringJob_sync",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "AddContextArtifactsAndExecutions"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_add_context_artifacts_and_executions_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_AddContextArtifactsAndExecutions_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "AddContextArtifactsAndExecutions"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_add_context_artifacts_and_executions_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_AddContextArtifactsAndExecutions_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "AddContextChildren"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_add_context_children_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_AddContextChildren_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "AddContextChildren"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_add_context_children_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_AddContextChildren_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "AddExecutionEvents"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_add_execution_events_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_AddExecutionEvents_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "AddExecutionEvents"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_add_execution_events_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_AddExecutionEvents_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "CreateArtifact"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_create_artifact_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_CreateArtifact_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "CreateArtifact"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_create_artifact_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_CreateArtifact_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "CreateContext"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_create_context_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_CreateContext_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "CreateContext"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_create_context_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_CreateContext_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "CreateExecution"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_create_execution_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_CreateExecution_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "CreateExecution"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_create_execution_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_CreateExecution_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "CreateMetadataSchema"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_create_metadata_schema_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_CreateMetadataSchema_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "CreateMetadataSchema"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_create_metadata_schema_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_CreateMetadataSchema_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "CreateMetadataStore"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_create_metadata_store_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_CreateMetadataStore_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "CreateMetadataStore"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_create_metadata_store_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_CreateMetadataStore_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "DeleteArtifact"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_artifact_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_DeleteArtifact_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "DeleteArtifact"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_artifact_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_DeleteArtifact_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "DeleteContext"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_context_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_DeleteContext_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "DeleteContext"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_context_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_DeleteContext_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "DeleteExecution"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_execution_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_DeleteExecution_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "DeleteExecution"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_execution_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_DeleteExecution_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "DeleteMetadataStore"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_metadata_store_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_DeleteMetadataStore_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "DeleteMetadataStore"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_delete_metadata_store_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_DeleteMetadataStore_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "GetArtifact"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_get_artifact_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_GetArtifact_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "GetArtifact"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_get_artifact_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_GetArtifact_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "GetContext"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_get_context_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_GetContext_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "GetContext"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_get_context_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_GetContext_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "GetExecution"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_get_execution_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_GetExecution_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "GetExecution"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_get_execution_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_GetExecution_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "GetMetadataSchema"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_get_metadata_schema_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_GetMetadataSchema_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "GetMetadataSchema"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_get_metadata_schema_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_GetMetadataSchema_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "GetMetadataStore"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_get_metadata_store_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_GetMetadataStore_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "GetMetadataStore"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_get_metadata_store_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_GetMetadataStore_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "ListArtifacts"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_list_artifacts_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_ListArtifacts_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "ListArtifacts"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_list_artifacts_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_ListArtifacts_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "ListContexts"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_list_contexts_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_ListContexts_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "ListContexts"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_list_contexts_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_ListContexts_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "ListExecutions"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_list_executions_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_ListExecutions_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "ListExecutions"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_list_executions_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_ListExecutions_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "ListMetadataSchemas"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_list_metadata_schemas_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_ListMetadataSchemas_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "ListMetadataSchemas"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_list_metadata_schemas_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_ListMetadataSchemas_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "ListMetadataStores"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_list_metadata_stores_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_ListMetadataStores_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "ListMetadataStores"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_list_metadata_stores_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_ListMetadataStores_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "PurgeArtifacts"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_artifacts_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_PurgeArtifacts_async",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "PurgeArtifacts"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_artifacts_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_PurgeArtifacts_sync",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "PurgeContexts"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_contexts_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_PurgeContexts_async",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "PurgeContexts"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_contexts_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_PurgeContexts_sync",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "PurgeExecutions"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_executions_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_PurgeExecutions_async",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "PurgeExecutions"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_purge_executions_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_PurgeExecutions_sync",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "QueryArtifactLineageSubgraph"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_query_artifact_lineage_subgraph_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_QueryArtifactLineageSubgraph_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "QueryArtifactLineageSubgraph"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_query_artifact_lineage_subgraph_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_QueryArtifactLineageSubgraph_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "QueryContextLineageSubgraph"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_query_context_lineage_subgraph_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_QueryContextLineageSubgraph_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "QueryContextLineageSubgraph"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_query_context_lineage_subgraph_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_QueryContextLineageSubgraph_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "QueryExecutionInputsAndOutputs"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_query_execution_inputs_and_outputs_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_QueryExecutionInputsAndOutputs_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "QueryExecutionInputsAndOutputs"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_query_execution_inputs_and_outputs_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_QueryExecutionInputsAndOutputs_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "UpdateArtifact"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_update_artifact_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_UpdateArtifact_async",
+ "segments": [
+ {
+ "end": 43,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 43,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 37,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 44,
+ "start": 41,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "UpdateArtifact"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_update_artifact_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_UpdateArtifact_sync",
+ "segments": [
+ {
+ "end": 43,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 43,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 37,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 44,
+ "start": 41,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "UpdateContext"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_update_context_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_UpdateContext_async",
+ "segments": [
+ {
+ "end": 43,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 43,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 37,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 44,
+ "start": 41,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "UpdateContext"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_update_context_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_UpdateContext_sync",
+ "segments": [
+ {
+ "end": 43,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 43,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 37,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 44,
+ "start": 41,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "UpdateExecution"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_update_execution_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_UpdateExecution_async",
+ "segments": [
+ {
+ "end": 43,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 43,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 37,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 44,
+ "start": 41,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MetadataService"
+ },
+ "shortName": "UpdateExecution"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_metadata_service_update_execution_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MetadataService_UpdateExecution_sync",
+ "segments": [
+ {
+ "end": 43,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 43,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 37,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 44,
+ "start": 41,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MigrationService"
+ },
+ "shortName": "BatchMigrateResources"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_migration_service_batch_migrate_resources_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MigrationService_BatchMigrateResources_async",
+ "segments": [
+ {
+ "end": 54,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 54,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 51,
+ "start": 45,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 55,
+ "start": 52,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MigrationService"
+ },
+ "shortName": "BatchMigrateResources"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_migration_service_batch_migrate_resources_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MigrationService_BatchMigrateResources_sync",
+ "segments": [
+ {
+ "end": 54,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 54,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 51,
+ "start": 45,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 55,
+ "start": 52,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "MigrationService"
+ },
+ "shortName": "SearchMigratableResources"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_migration_service_search_migratable_resources_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MigrationService_SearchMigratableResources_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "MigrationService"
+ },
+ "shortName": "SearchMigratableResources"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_migration_service_search_migratable_resources_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_MigrationService_SearchMigratableResources_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "ModelService"
+ },
+ "shortName": "DeleteModel"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_model_service_delete_model_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_ModelService_DeleteModel_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "ModelService"
+ },
+ "shortName": "DeleteModel"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_model_service_delete_model_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_ModelService_DeleteModel_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "ModelService"
+ },
+ "shortName": "ExportModel"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_model_service_export_model_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_ModelService_ExportModel_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "ModelService"
+ },
+ "shortName": "ExportModel"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_model_service_export_model_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_ModelService_ExportModel_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "ModelService"
+ },
+ "shortName": "GetModelEvaluationSlice"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_model_service_get_model_evaluation_slice_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_ModelService_GetModelEvaluationSlice_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "ModelService"
+ },
+ "shortName": "GetModelEvaluationSlice"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_model_service_get_model_evaluation_slice_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_ModelService_GetModelEvaluationSlice_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "ModelService"
+ },
+ "shortName": "GetModelEvaluation"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_model_service_get_model_evaluation_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_ModelService_GetModelEvaluation_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "ModelService"
+ },
+ "shortName": "GetModelEvaluation"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_model_service_get_model_evaluation_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_ModelService_GetModelEvaluation_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "ModelService"
+ },
+ "shortName": "GetModel"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_model_service_get_model_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_ModelService_GetModel_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "ModelService"
+ },
+ "shortName": "GetModel"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_model_service_get_model_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_ModelService_GetModel_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "ModelService"
+ },
+ "shortName": "ListModelEvaluationSlices"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_model_service_list_model_evaluation_slices_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_ModelService_ListModelEvaluationSlices_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "ModelService"
+ },
+ "shortName": "ListModelEvaluationSlices"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_model_service_list_model_evaluation_slices_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_ModelService_ListModelEvaluationSlices_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "ModelService"
+ },
+ "shortName": "ListModelEvaluations"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_model_service_list_model_evaluations_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_ModelService_ListModelEvaluations_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "ModelService"
+ },
+ "shortName": "ListModelEvaluations"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_model_service_list_model_evaluations_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_ModelService_ListModelEvaluations_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "ModelService"
+ },
+ "shortName": "ListModels"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_model_service_list_models_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_ModelService_ListModels_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "ModelService"
+ },
+ "shortName": "ListModels"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_model_service_list_models_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_ModelService_ListModels_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "ModelService"
+ },
+ "shortName": "UpdateModel"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_model_service_update_model_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_ModelService_UpdateModel_async",
+ "segments": [
+ {
+ "end": 47,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 47,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 42,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 48,
+ "start": 45,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "ModelService"
+ },
+ "shortName": "UpdateModel"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_model_service_update_model_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_ModelService_UpdateModel_sync",
+ "segments": [
+ {
+ "end": 47,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 47,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 42,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 48,
+ "start": 45,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "ModelService"
+ },
+ "shortName": "UploadModel"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_model_service_upload_model_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_ModelService_UploadModel_async",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "ModelService"
+ },
+ "shortName": "UploadModel"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_model_service_upload_model_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_ModelService_UploadModel_sync",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "PipelineService"
+ },
+ "shortName": "CancelPipelineJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_pipeline_service_cancel_pipeline_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_PipelineService_CancelPipelineJob_async",
+ "segments": [
+ {
+ "end": 42,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 42,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "PipelineService"
+ },
+ "shortName": "CancelPipelineJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_pipeline_service_cancel_pipeline_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_PipelineService_CancelPipelineJob_sync",
+ "segments": [
+ {
+ "end": 42,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 42,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "PipelineService"
+ },
+ "shortName": "CancelTrainingPipeline"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_pipeline_service_cancel_training_pipeline_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_PipelineService_CancelTrainingPipeline_async",
+ "segments": [
+ {
+ "end": 42,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 42,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "PipelineService"
+ },
+ "shortName": "CancelTrainingPipeline"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_pipeline_service_cancel_training_pipeline_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_PipelineService_CancelTrainingPipeline_sync",
+ "segments": [
+ {
+ "end": 42,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 42,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "PipelineService"
+ },
+ "shortName": "CreatePipelineJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_pipeline_service_create_pipeline_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_PipelineService_CreatePipelineJob_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "PipelineService"
+ },
+ "shortName": "CreatePipelineJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_pipeline_service_create_pipeline_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_PipelineService_CreatePipelineJob_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "PipelineService"
+ },
+ "shortName": "CreateTrainingPipeline"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_pipeline_service_create_training_pipeline_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_PipelineService_CreateTrainingPipeline_async",
+ "segments": [
+ {
+ "end": 50,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 50,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 45,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 51,
+ "start": 48,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "PipelineService"
+ },
+ "shortName": "CreateTrainingPipeline"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_pipeline_service_create_training_pipeline_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_PipelineService_CreateTrainingPipeline_sync",
+ "segments": [
+ {
+ "end": 50,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 50,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 45,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 51,
+ "start": 48,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "PipelineService"
+ },
+ "shortName": "DeletePipelineJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_pipeline_service_delete_pipeline_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_PipelineService_DeletePipelineJob_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "PipelineService"
+ },
+ "shortName": "DeletePipelineJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_pipeline_service_delete_pipeline_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_PipelineService_DeletePipelineJob_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "PipelineService"
+ },
+ "shortName": "DeleteTrainingPipeline"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_pipeline_service_delete_training_pipeline_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_PipelineService_DeleteTrainingPipeline_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "PipelineService"
+ },
+ "shortName": "DeleteTrainingPipeline"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_pipeline_service_delete_training_pipeline_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_PipelineService_DeleteTrainingPipeline_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "PipelineService"
+ },
+ "shortName": "GetPipelineJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_pipeline_service_get_pipeline_job_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_PipelineService_GetPipelineJob_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "PipelineService"
+ },
+ "shortName": "GetPipelineJob"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_pipeline_service_get_pipeline_job_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_PipelineService_GetPipelineJob_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "PipelineService"
+ },
+ "shortName": "GetTrainingPipeline"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_pipeline_service_get_training_pipeline_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_PipelineService_GetTrainingPipeline_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "PipelineService"
+ },
+ "shortName": "GetTrainingPipeline"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_pipeline_service_get_training_pipeline_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_PipelineService_GetTrainingPipeline_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "PipelineService"
+ },
+ "shortName": "ListPipelineJobs"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_pipeline_service_list_pipeline_jobs_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_PipelineService_ListPipelineJobs_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "PipelineService"
+ },
+ "shortName": "ListPipelineJobs"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_pipeline_service_list_pipeline_jobs_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_PipelineService_ListPipelineJobs_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "PipelineService"
+ },
+ "shortName": "ListTrainingPipelines"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_pipeline_service_list_training_pipelines_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_PipelineService_ListTrainingPipelines_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "PipelineService"
+ },
+ "shortName": "ListTrainingPipelines"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_pipeline_service_list_training_pipelines_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_PipelineService_ListTrainingPipelines_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "PredictionService"
+ },
+ "shortName": "Explain"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_prediction_service_explain_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_PredictionService_Explain_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "PredictionService"
+ },
+ "shortName": "Explain"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_prediction_service_explain_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_PredictionService_Explain_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "PredictionService"
+ },
+ "shortName": "Predict"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_prediction_service_predict_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_PredictionService_Predict_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "PredictionService"
+ },
+ "shortName": "Predict"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_prediction_service_predict_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_PredictionService_Predict_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "PredictionService"
+ },
+ "shortName": "RawPredict"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_prediction_service_raw_predict_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_PredictionService_RawPredict_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "PredictionService"
+ },
+ "shortName": "RawPredict"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_prediction_service_raw_predict_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_PredictionService_RawPredict_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "SpecialistPoolService"
+ },
+ "shortName": "CreateSpecialistPool"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_create_specialist_pool_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_SpecialistPoolService_CreateSpecialistPool_async",
+ "segments": [
+ {
+ "end": 53,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 53,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 54,
+ "start": 51,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "SpecialistPoolService"
+ },
+ "shortName": "CreateSpecialistPool"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_create_specialist_pool_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_SpecialistPoolService_CreateSpecialistPool_sync",
+ "segments": [
+ {
+ "end": 53,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 53,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 54,
+ "start": 51,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "SpecialistPoolService"
+ },
+ "shortName": "DeleteSpecialistPool"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_delete_specialist_pool_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_SpecialistPoolService_DeleteSpecialistPool_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "SpecialistPoolService"
+ },
+ "shortName": "DeleteSpecialistPool"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_delete_specialist_pool_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_SpecialistPoolService_DeleteSpecialistPool_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "SpecialistPoolService"
+ },
+ "shortName": "GetSpecialistPool"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_get_specialist_pool_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_SpecialistPoolService_GetSpecialistPool_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "SpecialistPoolService"
+ },
+ "shortName": "GetSpecialistPool"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_get_specialist_pool_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_SpecialistPoolService_GetSpecialistPool_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "SpecialistPoolService"
+ },
+ "shortName": "ListSpecialistPools"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_list_specialist_pools_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_SpecialistPoolService_ListSpecialistPools_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "SpecialistPoolService"
+ },
+ "shortName": "ListSpecialistPools"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_list_specialist_pools_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_SpecialistPoolService_ListSpecialistPools_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "SpecialistPoolService"
+ },
+ "shortName": "UpdateSpecialistPool"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_update_specialist_pool_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_SpecialistPoolService_UpdateSpecialistPool_async",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "SpecialistPoolService"
+ },
+ "shortName": "UpdateSpecialistPool"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_specialist_pool_service_update_specialist_pool_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_SpecialistPoolService_UpdateSpecialistPool_sync",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "BatchCreateTensorboardRuns"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_create_tensorboard_runs_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_BatchCreateTensorboardRuns_async",
+ "segments": [
+ {
+ "end": 50,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 50,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 45,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 51,
+ "start": 48,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "BatchCreateTensorboardRuns"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_create_tensorboard_runs_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_BatchCreateTensorboardRuns_sync",
+ "segments": [
+ {
+ "end": 50,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 50,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 45,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 51,
+ "start": 48,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "BatchCreateTensorboardTimeSeries"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_create_tensorboard_time_series_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_BatchCreateTensorboardTimeSeries_async",
+ "segments": [
+ {
+ "end": 50,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 50,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 45,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 51,
+ "start": 48,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "BatchCreateTensorboardTimeSeries"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_create_tensorboard_time_series_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_BatchCreateTensorboardTimeSeries_sync",
+ "segments": [
+ {
+ "end": 50,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 50,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 45,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 51,
+ "start": 48,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "BatchReadTensorboardTimeSeriesData"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_read_tensorboard_time_series_data_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_BatchReadTensorboardTimeSeriesData_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "BatchReadTensorboardTimeSeriesData"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_batch_read_tensorboard_time_series_data_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_BatchReadTensorboardTimeSeriesData_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "CreateTensorboardExperiment"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_experiment_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_CreateTensorboardExperiment_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "CreateTensorboardExperiment"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_experiment_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_CreateTensorboardExperiment_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "CreateTensorboardRun"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_run_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_CreateTensorboardRun_async",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "CreateTensorboardRun"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_run_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_CreateTensorboardRun_sync",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "CreateTensorboardTimeSeries"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_time_series_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_CreateTensorboardTimeSeries_async",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "CreateTensorboardTimeSeries"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_time_series_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_CreateTensorboardTimeSeries_sync",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "CreateTensorboard"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_CreateTensorboard_async",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "CreateTensorboard"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_create_tensorboard_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_CreateTensorboard_sync",
+ "segments": [
+ {
+ "end": 52,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 52,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 49,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 53,
+ "start": 50,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "DeleteTensorboardExperiment"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_experiment_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_DeleteTensorboardExperiment_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "DeleteTensorboardExperiment"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_experiment_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_DeleteTensorboardExperiment_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "DeleteTensorboardRun"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_run_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_DeleteTensorboardRun_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "DeleteTensorboardRun"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_run_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_DeleteTensorboardRun_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "DeleteTensorboardTimeSeries"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_time_series_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_DeleteTensorboardTimeSeries_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "DeleteTensorboardTimeSeries"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_time_series_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_DeleteTensorboardTimeSeries_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "DeleteTensorboard"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_DeleteTensorboard_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "DeleteTensorboard"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_delete_tensorboard_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_DeleteTensorboard_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "ExportTensorboardTimeSeriesData"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_export_tensorboard_time_series_data_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_ExportTensorboardTimeSeriesData_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "ExportTensorboardTimeSeriesData"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_export_tensorboard_time_series_data_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_ExportTensorboardTimeSeriesData_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "GetTensorboardExperiment"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_experiment_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_GetTensorboardExperiment_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "GetTensorboardExperiment"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_experiment_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_GetTensorboardExperiment_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "GetTensorboardRun"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_run_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_GetTensorboardRun_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "GetTensorboardRun"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_run_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_GetTensorboardRun_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "GetTensorboardTimeSeries"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_time_series_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_GetTensorboardTimeSeries_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "GetTensorboardTimeSeries"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_time_series_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_GetTensorboardTimeSeries_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "GetTensorboard"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_GetTensorboard_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "GetTensorboard"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_get_tensorboard_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_GetTensorboard_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "ListTensorboardExperiments"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_experiments_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_ListTensorboardExperiments_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "ListTensorboardExperiments"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_experiments_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_ListTensorboardExperiments_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "ListTensorboardRuns"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_runs_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_ListTensorboardRuns_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "ListTensorboardRuns"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_runs_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_ListTensorboardRuns_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "ListTensorboardTimeSeries"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_time_series_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_ListTensorboardTimeSeries_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "ListTensorboardTimeSeries"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboard_time_series_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_ListTensorboardTimeSeries_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "ListTensorboards"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboards_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_ListTensorboards_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "ListTensorboards"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_list_tensorboards_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_ListTensorboards_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "ReadTensorboardBlobData"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_read_tensorboard_blob_data_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_ReadTensorboardBlobData_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "ReadTensorboardBlobData"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_read_tensorboard_blob_data_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_ReadTensorboardBlobData_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "ReadTensorboardTimeSeriesData"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_read_tensorboard_time_series_data_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_ReadTensorboardTimeSeriesData_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "ReadTensorboardTimeSeriesData"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_read_tensorboard_time_series_data_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_ReadTensorboardTimeSeriesData_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "UpdateTensorboardExperiment"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_experiment_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_UpdateTensorboardExperiment_async",
+ "segments": [
+ {
+ "end": 43,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 43,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 37,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 44,
+ "start": 41,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "UpdateTensorboardExperiment"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_experiment_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_UpdateTensorboardExperiment_sync",
+ "segments": [
+ {
+ "end": 43,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 43,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 37,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 40,
+ "start": 38,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 44,
+ "start": 41,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "UpdateTensorboardRun"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_run_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_UpdateTensorboardRun_async",
+ "segments": [
+ {
+ "end": 47,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 47,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 42,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 48,
+ "start": 45,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "UpdateTensorboardRun"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_run_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_UpdateTensorboardRun_sync",
+ "segments": [
+ {
+ "end": 47,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 47,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 42,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 48,
+ "start": 45,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "UpdateTensorboardTimeSeries"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_time_series_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_UpdateTensorboardTimeSeries_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "UpdateTensorboardTimeSeries"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_time_series_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_UpdateTensorboardTimeSeries_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 43,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "UpdateTensorboard"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_UpdateTensorboard_async",
+ "segments": [
+ {
+ "end": 51,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 51,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 42,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 52,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "UpdateTensorboard"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_update_tensorboard_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_UpdateTensorboard_sync",
+ "segments": [
+ {
+ "end": 51,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 51,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 48,
+ "start": 42,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 52,
+ "start": 49,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "WriteTensorboardExperimentData"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_write_tensorboard_experiment_data_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_WriteTensorboardExperimentData_async",
+ "segments": [
+ {
+ "end": 50,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 50,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 45,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 51,
+ "start": 48,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "WriteTensorboardExperimentData"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_write_tensorboard_experiment_data_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_WriteTensorboardExperimentData_sync",
+ "segments": [
+ {
+ "end": 50,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 50,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 44,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 45,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 51,
+ "start": 48,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "WriteTensorboardRunData"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_write_tensorboard_run_data_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_WriteTensorboardRunData_async",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "TensorboardService"
+ },
+ "shortName": "WriteTensorboardRunData"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_tensorboard_service_write_tensorboard_run_data_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_TensorboardService_WriteTensorboardRunData_sync",
+ "segments": [
+ {
+ "end": 49,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 49,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 43,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 46,
+ "start": 44,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 50,
+ "start": 47,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "AddTrialMeasurement"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_vizier_service_add_trial_measurement_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_VizierService_AddTrialMeasurement_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "AddTrialMeasurement"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_vizier_service_add_trial_measurement_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_VizierService_AddTrialMeasurement_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "CheckTrialEarlyStoppingState"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_vizier_service_check_trial_early_stopping_state_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_VizierService_CheckTrialEarlyStoppingState_async",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "CheckTrialEarlyStoppingState"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_vizier_service_check_trial_early_stopping_state_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_VizierService_CheckTrialEarlyStoppingState_sync",
+ "segments": [
+ {
+ "end": 48,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 48,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 45,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 49,
+ "start": 46,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "CompleteTrial"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_vizier_service_complete_trial_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_VizierService_CompleteTrial_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "CompleteTrial"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_vizier_service_complete_trial_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_VizierService_CompleteTrial_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "CreateStudy"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_vizier_service_create_study_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_VizierService_CreateStudy_async",
+ "segments": [
+ {
+ "end": 53,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 53,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 48,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 54,
+ "start": 51,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "CreateStudy"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_vizier_service_create_study_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_VizierService_CreateStudy_sync",
+ "segments": [
+ {
+ "end": 53,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 53,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 50,
+ "start": 48,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 54,
+ "start": 51,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "CreateTrial"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_vizier_service_create_trial_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_VizierService_CreateTrial_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "CreateTrial"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_vizier_service_create_trial_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_VizierService_CreateTrial_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "DeleteStudy"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_vizier_service_delete_study_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_VizierService_DeleteStudy_async",
+ "segments": [
+ {
+ "end": 42,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 42,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "DeleteStudy"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_vizier_service_delete_study_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_VizierService_DeleteStudy_sync",
+ "segments": [
+ {
+ "end": 42,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 42,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "DeleteTrial"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_vizier_service_delete_trial_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_VizierService_DeleteTrial_async",
+ "segments": [
+ {
+ "end": 42,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 42,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "DeleteTrial"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_vizier_service_delete_trial_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_VizierService_DeleteTrial_sync",
+ "segments": [
+ {
+ "end": 42,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 42,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "GetStudy"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_vizier_service_get_study_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_VizierService_GetStudy_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "GetStudy"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_vizier_service_get_study_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_VizierService_GetStudy_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "GetTrial"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_vizier_service_get_trial_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_VizierService_GetTrial_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "GetTrial"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_vizier_service_get_trial_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_VizierService_GetTrial_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "ListOptimalTrials"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_vizier_service_list_optimal_trials_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_VizierService_ListOptimalTrials_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "ListOptimalTrials"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_vizier_service_list_optimal_trials_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_VizierService_ListOptimalTrials_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "ListStudies"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_vizier_service_list_studies_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_VizierService_ListStudies_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "ListStudies"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_vizier_service_list_studies_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_VizierService_ListStudies_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "ListTrials"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_vizier_service_list_trials_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_VizierService_ListTrials_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "ListTrials"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_vizier_service_list_trials_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_VizierService_ListTrials_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "LookupStudy"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_vizier_service_lookup_study_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_VizierService_LookupStudy_async",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "LookupStudy"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_vizier_service_lookup_study_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_VizierService_LookupStudy_sync",
+ "segments": [
+ {
+ "end": 45,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 45,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 39,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 42,
+ "start": 40,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 46,
+ "start": 43,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "StopTrial"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_vizier_service_stop_trial_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_VizierService_StopTrial_async",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "StopTrial"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_vizier_service_stop_trial_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_VizierService_StopTrial_sync",
+ "segments": [
+ {
+ "end": 44,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 44,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 38,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 41,
+ "start": 39,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 45,
+ "start": 42,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "async": true,
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "SuggestTrials"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_vizier_service_suggest_trials_async.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_VizierService_SuggestTrials_async",
+ "segments": [
+ {
+ "end": 50,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 50,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 40,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 41,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 51,
+ "start": 48,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ },
+ {
+ "clientMethod": {
+ "method": {
+ "service": {
+ "shortName": "VizierService"
+ },
+ "shortName": "SuggestTrials"
+ }
+ },
+ "file": "aiplatform_generated_aiplatform_v1beta1_vizier_service_suggest_trials_sync.py",
+ "regionTag": "aiplatform_generated_aiplatform_v1beta1_VizierService_SuggestTrials_sync",
+ "segments": [
+ {
+ "end": 50,
+ "start": 27,
+ "type": "FULL"
+ },
+ {
+ "end": 50,
+ "start": 27,
+ "type": "SHORT"
+ },
+ {
+ "end": 33,
+ "start": 31,
+ "type": "CLIENT_INITIALIZATION"
+ },
+ {
+ "end": 40,
+ "start": 34,
+ "type": "REQUEST_INITIALIZATION"
+ },
+ {
+ "end": 47,
+ "start": 41,
+ "type": "REQUEST_EXECUTION"
+ },
+ {
+ "end": 51,
+ "start": 48,
+ "type": "RESPONSE_HANDLING"
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/unit/gapic/aiplatform_v1/test_dataset_service.py b/tests/unit/gapic/aiplatform_v1/test_dataset_service.py
index b8980f4720..5d60113921 100644
--- a/tests/unit/gapic/aiplatform_v1/test_dataset_service.py
+++ b/tests/unit/gapic/aiplatform_v1/test_dataset_service.py
@@ -562,35 +562,6 @@ def test_dataset_service_client_client_options_credentials_file(
always_use_jwt_access=True,
)
- if "grpc" in transport_name:
- # test that the credentials from file are saved and used as the credentials.
- with mock.patch.object(
- google.auth, "load_credentials_from_file", autospec=True
- ) as load_creds, mock.patch.object(
- google.auth, "default", autospec=True
- ) as adc, mock.patch.object(
- grpc_helpers, "create_channel"
- ) as create_channel:
- creds = ga_credentials.AnonymousCredentials()
- file_creds = ga_credentials.AnonymousCredentials()
- load_creds.return_value = (file_creds, None)
- adc.return_value = (creds, None)
- client = client_class(client_options=options, transport=transport_name)
- create_channel.assert_called_with(
- "aiplatform.googleapis.com:443",
- credentials=file_creds,
- credentials_file=None,
- quota_project_id=None,
- default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
- scopes=None,
- default_host="aiplatform.googleapis.com",
- ssl_credentials=None,
- options=[
- ("grpc.max_send_message_length", -1),
- ("grpc.max_receive_message_length", -1),
- ],
- )
-
def test_dataset_service_client_client_options_from_dict():
with mock.patch(
@@ -612,6 +583,72 @@ def test_dataset_service_client_client_options_from_dict():
)
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ DatasetServiceClient,
+ transports.DatasetServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ DatasetServiceAsyncClient,
+ transports.DatasetServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_dataset_service_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
@pytest.mark.parametrize("request_type", [dataset_service.CreateDatasetRequest, dict,])
def test_create_dataset(request_type, transport: str = "grpc"):
client = DatasetServiceClient(
diff --git a/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py b/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py
index a09ea22507..a9d5d65c61 100644
--- a/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py
+++ b/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py
@@ -572,35 +572,6 @@ def test_endpoint_service_client_client_options_credentials_file(
always_use_jwt_access=True,
)
- if "grpc" in transport_name:
- # test that the credentials from file are saved and used as the credentials.
- with mock.patch.object(
- google.auth, "load_credentials_from_file", autospec=True
- ) as load_creds, mock.patch.object(
- google.auth, "default", autospec=True
- ) as adc, mock.patch.object(
- grpc_helpers, "create_channel"
- ) as create_channel:
- creds = ga_credentials.AnonymousCredentials()
- file_creds = ga_credentials.AnonymousCredentials()
- load_creds.return_value = (file_creds, None)
- adc.return_value = (creds, None)
- client = client_class(client_options=options, transport=transport_name)
- create_channel.assert_called_with(
- "aiplatform.googleapis.com:443",
- credentials=file_creds,
- credentials_file=None,
- quota_project_id=None,
- default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
- scopes=None,
- default_host="aiplatform.googleapis.com",
- ssl_credentials=None,
- options=[
- ("grpc.max_send_message_length", -1),
- ("grpc.max_receive_message_length", -1),
- ],
- )
-
def test_endpoint_service_client_client_options_from_dict():
with mock.patch(
@@ -622,6 +593,72 @@ def test_endpoint_service_client_client_options_from_dict():
)
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ EndpointServiceClient,
+ transports.EndpointServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ EndpointServiceAsyncClient,
+ transports.EndpointServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_endpoint_service_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
@pytest.mark.parametrize(
"request_type", [endpoint_service.CreateEndpointRequest, dict,]
)
diff --git a/tests/unit/gapic/aiplatform_v1/test_featurestore_online_serving_service.py b/tests/unit/gapic/aiplatform_v1/test_featurestore_online_serving_service.py
index eb470a91df..03caaf75d1 100644
--- a/tests/unit/gapic/aiplatform_v1/test_featurestore_online_serving_service.py
+++ b/tests/unit/gapic/aiplatform_v1/test_featurestore_online_serving_service.py
@@ -602,35 +602,6 @@ def test_featurestore_online_serving_service_client_client_options_credentials_f
always_use_jwt_access=True,
)
- if "grpc" in transport_name:
- # test that the credentials from file are saved and used as the credentials.
- with mock.patch.object(
- google.auth, "load_credentials_from_file", autospec=True
- ) as load_creds, mock.patch.object(
- google.auth, "default", autospec=True
- ) as adc, mock.patch.object(
- grpc_helpers, "create_channel"
- ) as create_channel:
- creds = ga_credentials.AnonymousCredentials()
- file_creds = ga_credentials.AnonymousCredentials()
- load_creds.return_value = (file_creds, None)
- adc.return_value = (creds, None)
- client = client_class(client_options=options, transport=transport_name)
- create_channel.assert_called_with(
- "aiplatform.googleapis.com:443",
- credentials=file_creds,
- credentials_file=None,
- quota_project_id=None,
- default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
- scopes=None,
- default_host="aiplatform.googleapis.com",
- ssl_credentials=None,
- options=[
- ("grpc.max_send_message_length", -1),
- ("grpc.max_receive_message_length", -1),
- ],
- )
-
def test_featurestore_online_serving_service_client_client_options_from_dict():
with mock.patch(
@@ -652,6 +623,72 @@ def test_featurestore_online_serving_service_client_client_options_from_dict():
)
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ FeaturestoreOnlineServingServiceClient,
+ transports.FeaturestoreOnlineServingServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ FeaturestoreOnlineServingServiceAsyncClient,
+ transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_featurestore_online_serving_service_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
@pytest.mark.parametrize(
"request_type", [featurestore_online_service.ReadFeatureValuesRequest, dict,]
)
diff --git a/tests/unit/gapic/aiplatform_v1/test_featurestore_service.py b/tests/unit/gapic/aiplatform_v1/test_featurestore_service.py
index 85357a09ab..7840e055a2 100644
--- a/tests/unit/gapic/aiplatform_v1/test_featurestore_service.py
+++ b/tests/unit/gapic/aiplatform_v1/test_featurestore_service.py
@@ -584,35 +584,6 @@ def test_featurestore_service_client_client_options_credentials_file(
always_use_jwt_access=True,
)
- if "grpc" in transport_name:
- # test that the credentials from file are saved and used as the credentials.
- with mock.patch.object(
- google.auth, "load_credentials_from_file", autospec=True
- ) as load_creds, mock.patch.object(
- google.auth, "default", autospec=True
- ) as adc, mock.patch.object(
- grpc_helpers, "create_channel"
- ) as create_channel:
- creds = ga_credentials.AnonymousCredentials()
- file_creds = ga_credentials.AnonymousCredentials()
- load_creds.return_value = (file_creds, None)
- adc.return_value = (creds, None)
- client = client_class(client_options=options, transport=transport_name)
- create_channel.assert_called_with(
- "aiplatform.googleapis.com:443",
- credentials=file_creds,
- credentials_file=None,
- quota_project_id=None,
- default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
- scopes=None,
- default_host="aiplatform.googleapis.com",
- ssl_credentials=None,
- options=[
- ("grpc.max_send_message_length", -1),
- ("grpc.max_receive_message_length", -1),
- ],
- )
-
def test_featurestore_service_client_client_options_from_dict():
with mock.patch(
@@ -634,6 +605,72 @@ def test_featurestore_service_client_client_options_from_dict():
)
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ FeaturestoreServiceClient,
+ transports.FeaturestoreServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ FeaturestoreServiceAsyncClient,
+ transports.FeaturestoreServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_featurestore_service_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
@pytest.mark.parametrize(
"request_type", [featurestore_service.CreateFeaturestoreRequest, dict,]
)
diff --git a/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py b/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py
index 2bd62d992e..8655b50b0d 100644
--- a/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py
+++ b/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py
@@ -579,35 +579,6 @@ def test_index_endpoint_service_client_client_options_credentials_file(
always_use_jwt_access=True,
)
- if "grpc" in transport_name:
- # test that the credentials from file are saved and used as the credentials.
- with mock.patch.object(
- google.auth, "load_credentials_from_file", autospec=True
- ) as load_creds, mock.patch.object(
- google.auth, "default", autospec=True
- ) as adc, mock.patch.object(
- grpc_helpers, "create_channel"
- ) as create_channel:
- creds = ga_credentials.AnonymousCredentials()
- file_creds = ga_credentials.AnonymousCredentials()
- load_creds.return_value = (file_creds, None)
- adc.return_value = (creds, None)
- client = client_class(client_options=options, transport=transport_name)
- create_channel.assert_called_with(
- "aiplatform.googleapis.com:443",
- credentials=file_creds,
- credentials_file=None,
- quota_project_id=None,
- default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
- scopes=None,
- default_host="aiplatform.googleapis.com",
- ssl_credentials=None,
- options=[
- ("grpc.max_send_message_length", -1),
- ("grpc.max_receive_message_length", -1),
- ],
- )
-
def test_index_endpoint_service_client_client_options_from_dict():
with mock.patch(
@@ -629,6 +600,72 @@ def test_index_endpoint_service_client_client_options_from_dict():
)
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ IndexEndpointServiceClient,
+ transports.IndexEndpointServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ IndexEndpointServiceAsyncClient,
+ transports.IndexEndpointServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_index_endpoint_service_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
@pytest.mark.parametrize(
"request_type", [index_endpoint_service.CreateIndexEndpointRequest, dict,]
)
diff --git a/tests/unit/gapic/aiplatform_v1/test_index_service.py b/tests/unit/gapic/aiplatform_v1/test_index_service.py
index d6d0a9a017..6504d34ade 100644
--- a/tests/unit/gapic/aiplatform_v1/test_index_service.py
+++ b/tests/unit/gapic/aiplatform_v1/test_index_service.py
@@ -541,35 +541,6 @@ def test_index_service_client_client_options_credentials_file(
always_use_jwt_access=True,
)
- if "grpc" in transport_name:
- # test that the credentials from file are saved and used as the credentials.
- with mock.patch.object(
- google.auth, "load_credentials_from_file", autospec=True
- ) as load_creds, mock.patch.object(
- google.auth, "default", autospec=True
- ) as adc, mock.patch.object(
- grpc_helpers, "create_channel"
- ) as create_channel:
- creds = ga_credentials.AnonymousCredentials()
- file_creds = ga_credentials.AnonymousCredentials()
- load_creds.return_value = (file_creds, None)
- adc.return_value = (creds, None)
- client = client_class(client_options=options, transport=transport_name)
- create_channel.assert_called_with(
- "aiplatform.googleapis.com:443",
- credentials=file_creds,
- credentials_file=None,
- quota_project_id=None,
- default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
- scopes=None,
- default_host="aiplatform.googleapis.com",
- ssl_credentials=None,
- options=[
- ("grpc.max_send_message_length", -1),
- ("grpc.max_receive_message_length", -1),
- ],
- )
-
def test_index_service_client_client_options_from_dict():
with mock.patch(
@@ -589,6 +560,72 @@ def test_index_service_client_client_options_from_dict():
)
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ IndexServiceClient,
+ transports.IndexServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ IndexServiceAsyncClient,
+ transports.IndexServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_index_service_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
@pytest.mark.parametrize("request_type", [index_service.CreateIndexRequest, dict,])
def test_create_index(request_type, transport: str = "grpc"):
client = IndexServiceClient(
diff --git a/tests/unit/gapic/aiplatform_v1/test_job_service.py b/tests/unit/gapic/aiplatform_v1/test_job_service.py
index 3bd3c6d8a6..7edb85da91 100644
--- a/tests/unit/gapic/aiplatform_v1/test_job_service.py
+++ b/tests/unit/gapic/aiplatform_v1/test_job_service.py
@@ -567,35 +567,6 @@ def test_job_service_client_client_options_credentials_file(
always_use_jwt_access=True,
)
- if "grpc" in transport_name:
- # test that the credentials from file are saved and used as the credentials.
- with mock.patch.object(
- google.auth, "load_credentials_from_file", autospec=True
- ) as load_creds, mock.patch.object(
- google.auth, "default", autospec=True
- ) as adc, mock.patch.object(
- grpc_helpers, "create_channel"
- ) as create_channel:
- creds = ga_credentials.AnonymousCredentials()
- file_creds = ga_credentials.AnonymousCredentials()
- load_creds.return_value = (file_creds, None)
- adc.return_value = (creds, None)
- client = client_class(client_options=options, transport=transport_name)
- create_channel.assert_called_with(
- "aiplatform.googleapis.com:443",
- credentials=file_creds,
- credentials_file=None,
- quota_project_id=None,
- default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
- scopes=None,
- default_host="aiplatform.googleapis.com",
- ssl_credentials=None,
- options=[
- ("grpc.max_send_message_length", -1),
- ("grpc.max_receive_message_length", -1),
- ],
- )
-
def test_job_service_client_client_options_from_dict():
with mock.patch(
@@ -615,6 +586,67 @@ def test_job_service_client_client_options_from_dict():
)
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (JobServiceClient, transports.JobServiceGrpcTransport, "grpc", grpc_helpers),
+ (
+ JobServiceAsyncClient,
+ transports.JobServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_job_service_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
@pytest.mark.parametrize("request_type", [job_service.CreateCustomJobRequest, dict,])
def test_create_custom_job(request_type, transport: str = "grpc"):
client = JobServiceClient(
diff --git a/tests/unit/gapic/aiplatform_v1/test_metadata_service.py b/tests/unit/gapic/aiplatform_v1/test_metadata_service.py
index 325acfdb02..13984c877c 100644
--- a/tests/unit/gapic/aiplatform_v1/test_metadata_service.py
+++ b/tests/unit/gapic/aiplatform_v1/test_metadata_service.py
@@ -578,35 +578,6 @@ def test_metadata_service_client_client_options_credentials_file(
always_use_jwt_access=True,
)
- if "grpc" in transport_name:
- # test that the credentials from file are saved and used as the credentials.
- with mock.patch.object(
- google.auth, "load_credentials_from_file", autospec=True
- ) as load_creds, mock.patch.object(
- google.auth, "default", autospec=True
- ) as adc, mock.patch.object(
- grpc_helpers, "create_channel"
- ) as create_channel:
- creds = ga_credentials.AnonymousCredentials()
- file_creds = ga_credentials.AnonymousCredentials()
- load_creds.return_value = (file_creds, None)
- adc.return_value = (creds, None)
- client = client_class(client_options=options, transport=transport_name)
- create_channel.assert_called_with(
- "aiplatform.googleapis.com:443",
- credentials=file_creds,
- credentials_file=None,
- quota_project_id=None,
- default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
- scopes=None,
- default_host="aiplatform.googleapis.com",
- ssl_credentials=None,
- options=[
- ("grpc.max_send_message_length", -1),
- ("grpc.max_receive_message_length", -1),
- ],
- )
-
def test_metadata_service_client_client_options_from_dict():
with mock.patch(
@@ -628,6 +599,72 @@ def test_metadata_service_client_client_options_from_dict():
)
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ MetadataServiceClient,
+ transports.MetadataServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ MetadataServiceAsyncClient,
+ transports.MetadataServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_metadata_service_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
@pytest.mark.parametrize(
"request_type", [metadata_service.CreateMetadataStoreRequest, dict,]
)
diff --git a/tests/unit/gapic/aiplatform_v1/test_migration_service.py b/tests/unit/gapic/aiplatform_v1/test_migration_service.py
index 0ab11cc1e3..66f55b6a42 100644
--- a/tests/unit/gapic/aiplatform_v1/test_migration_service.py
+++ b/tests/unit/gapic/aiplatform_v1/test_migration_service.py
@@ -563,35 +563,6 @@ def test_migration_service_client_client_options_credentials_file(
always_use_jwt_access=True,
)
- if "grpc" in transport_name:
- # test that the credentials from file are saved and used as the credentials.
- with mock.patch.object(
- google.auth, "load_credentials_from_file", autospec=True
- ) as load_creds, mock.patch.object(
- google.auth, "default", autospec=True
- ) as adc, mock.patch.object(
- grpc_helpers, "create_channel"
- ) as create_channel:
- creds = ga_credentials.AnonymousCredentials()
- file_creds = ga_credentials.AnonymousCredentials()
- load_creds.return_value = (file_creds, None)
- adc.return_value = (creds, None)
- client = client_class(client_options=options, transport=transport_name)
- create_channel.assert_called_with(
- "aiplatform.googleapis.com:443",
- credentials=file_creds,
- credentials_file=None,
- quota_project_id=None,
- default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
- scopes=None,
- default_host="aiplatform.googleapis.com",
- ssl_credentials=None,
- options=[
- ("grpc.max_send_message_length", -1),
- ("grpc.max_receive_message_length", -1),
- ],
- )
-
def test_migration_service_client_client_options_from_dict():
with mock.patch(
@@ -613,6 +584,72 @@ def test_migration_service_client_client_options_from_dict():
)
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ MigrationServiceClient,
+ transports.MigrationServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ MigrationServiceAsyncClient,
+ transports.MigrationServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_migration_service_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
@pytest.mark.parametrize(
"request_type", [migration_service.SearchMigratableResourcesRequest, dict,]
)
@@ -1780,20 +1817,18 @@ def test_parse_dataset_path():
def test_dataset_path():
project = "squid"
- location = "clam"
- dataset = "whelk"
- expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(
- project=project, location=location, dataset=dataset,
+ dataset = "clam"
+ expected = "projects/{project}/datasets/{dataset}".format(
+ project=project, dataset=dataset,
)
- actual = MigrationServiceClient.dataset_path(project, location, dataset)
+ actual = MigrationServiceClient.dataset_path(project, dataset)
assert expected == actual
def test_parse_dataset_path():
expected = {
- "project": "octopus",
- "location": "oyster",
- "dataset": "nudibranch",
+ "project": "whelk",
+ "dataset": "octopus",
}
path = MigrationServiceClient.dataset_path(**expected)
@@ -1803,18 +1838,20 @@ def test_parse_dataset_path():
def test_dataset_path():
- project = "cuttlefish"
- dataset = "mussel"
- expected = "projects/{project}/datasets/{dataset}".format(
- project=project, dataset=dataset,
+ project = "oyster"
+ location = "nudibranch"
+ dataset = "cuttlefish"
+ expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(
+ project=project, location=location, dataset=dataset,
)
- actual = MigrationServiceClient.dataset_path(project, dataset)
+ actual = MigrationServiceClient.dataset_path(project, location, dataset)
assert expected == actual
def test_parse_dataset_path():
expected = {
- "project": "winkle",
+ "project": "mussel",
+ "location": "winkle",
"dataset": "nautilus",
}
path = MigrationServiceClient.dataset_path(**expected)
diff --git a/tests/unit/gapic/aiplatform_v1/test_model_service.py b/tests/unit/gapic/aiplatform_v1/test_model_service.py
index 3c848d8ff2..d17a1448f3 100644
--- a/tests/unit/gapic/aiplatform_v1/test_model_service.py
+++ b/tests/unit/gapic/aiplatform_v1/test_model_service.py
@@ -548,35 +548,6 @@ def test_model_service_client_client_options_credentials_file(
always_use_jwt_access=True,
)
- if "grpc" in transport_name:
- # test that the credentials from file are saved and used as the credentials.
- with mock.patch.object(
- google.auth, "load_credentials_from_file", autospec=True
- ) as load_creds, mock.patch.object(
- google.auth, "default", autospec=True
- ) as adc, mock.patch.object(
- grpc_helpers, "create_channel"
- ) as create_channel:
- creds = ga_credentials.AnonymousCredentials()
- file_creds = ga_credentials.AnonymousCredentials()
- load_creds.return_value = (file_creds, None)
- adc.return_value = (creds, None)
- client = client_class(client_options=options, transport=transport_name)
- create_channel.assert_called_with(
- "aiplatform.googleapis.com:443",
- credentials=file_creds,
- credentials_file=None,
- quota_project_id=None,
- default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
- scopes=None,
- default_host="aiplatform.googleapis.com",
- ssl_credentials=None,
- options=[
- ("grpc.max_send_message_length", -1),
- ("grpc.max_receive_message_length", -1),
- ],
- )
-
def test_model_service_client_client_options_from_dict():
with mock.patch(
@@ -596,6 +567,72 @@ def test_model_service_client_client_options_from_dict():
)
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ ModelServiceClient,
+ transports.ModelServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ ModelServiceAsyncClient,
+ transports.ModelServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_model_service_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
@pytest.mark.parametrize("request_type", [model_service.UploadModelRequest, dict,])
def test_upload_model(request_type, transport: str = "grpc"):
client = ModelServiceClient(
diff --git a/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py b/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py
index 65169e1868..f6ac58a714 100644
--- a/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py
+++ b/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py
@@ -583,35 +583,6 @@ def test_pipeline_service_client_client_options_credentials_file(
always_use_jwt_access=True,
)
- if "grpc" in transport_name:
- # test that the credentials from file are saved and used as the credentials.
- with mock.patch.object(
- google.auth, "load_credentials_from_file", autospec=True
- ) as load_creds, mock.patch.object(
- google.auth, "default", autospec=True
- ) as adc, mock.patch.object(
- grpc_helpers, "create_channel"
- ) as create_channel:
- creds = ga_credentials.AnonymousCredentials()
- file_creds = ga_credentials.AnonymousCredentials()
- load_creds.return_value = (file_creds, None)
- adc.return_value = (creds, None)
- client = client_class(client_options=options, transport=transport_name)
- create_channel.assert_called_with(
- "aiplatform.googleapis.com:443",
- credentials=file_creds,
- credentials_file=None,
- quota_project_id=None,
- default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
- scopes=None,
- default_host="aiplatform.googleapis.com",
- ssl_credentials=None,
- options=[
- ("grpc.max_send_message_length", -1),
- ("grpc.max_receive_message_length", -1),
- ],
- )
-
def test_pipeline_service_client_client_options_from_dict():
with mock.patch(
@@ -633,6 +604,72 @@ def test_pipeline_service_client_client_options_from_dict():
)
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ PipelineServiceClient,
+ transports.PipelineServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ PipelineServiceAsyncClient,
+ transports.PipelineServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_pipeline_service_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
@pytest.mark.parametrize(
"request_type", [pipeline_service.CreateTrainingPipelineRequest, dict,]
)
diff --git a/tests/unit/gapic/aiplatform_v1/test_prediction_service.py b/tests/unit/gapic/aiplatform_v1/test_prediction_service.py
index bc3bf66974..6266929773 100644
--- a/tests/unit/gapic/aiplatform_v1/test_prediction_service.py
+++ b/tests/unit/gapic/aiplatform_v1/test_prediction_service.py
@@ -562,35 +562,6 @@ def test_prediction_service_client_client_options_credentials_file(
always_use_jwt_access=True,
)
- if "grpc" in transport_name:
- # test that the credentials from file are saved and used as the credentials.
- with mock.patch.object(
- google.auth, "load_credentials_from_file", autospec=True
- ) as load_creds, mock.patch.object(
- google.auth, "default", autospec=True
- ) as adc, mock.patch.object(
- grpc_helpers, "create_channel"
- ) as create_channel:
- creds = ga_credentials.AnonymousCredentials()
- file_creds = ga_credentials.AnonymousCredentials()
- load_creds.return_value = (file_creds, None)
- adc.return_value = (creds, None)
- client = client_class(client_options=options, transport=transport_name)
- create_channel.assert_called_with(
- "aiplatform.googleapis.com:443",
- credentials=file_creds,
- credentials_file=None,
- quota_project_id=None,
- default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
- scopes=None,
- default_host="aiplatform.googleapis.com",
- ssl_credentials=None,
- options=[
- ("grpc.max_send_message_length", -1),
- ("grpc.max_receive_message_length", -1),
- ],
- )
-
def test_prediction_service_client_client_options_from_dict():
with mock.patch(
@@ -612,6 +583,72 @@ def test_prediction_service_client_client_options_from_dict():
)
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ PredictionServiceClient,
+ transports.PredictionServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ PredictionServiceAsyncClient,
+ transports.PredictionServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_prediction_service_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
@pytest.mark.parametrize("request_type", [prediction_service.PredictRequest, dict,])
def test_predict(request_type, transport: str = "grpc"):
client = PredictionServiceClient(
diff --git a/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py b/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py
index 22f9df3240..2d9c102a83 100644
--- a/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py
+++ b/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py
@@ -576,35 +576,6 @@ def test_specialist_pool_service_client_client_options_credentials_file(
always_use_jwt_access=True,
)
- if "grpc" in transport_name:
- # test that the credentials from file are saved and used as the credentials.
- with mock.patch.object(
- google.auth, "load_credentials_from_file", autospec=True
- ) as load_creds, mock.patch.object(
- google.auth, "default", autospec=True
- ) as adc, mock.patch.object(
- grpc_helpers, "create_channel"
- ) as create_channel:
- creds = ga_credentials.AnonymousCredentials()
- file_creds = ga_credentials.AnonymousCredentials()
- load_creds.return_value = (file_creds, None)
- adc.return_value = (creds, None)
- client = client_class(client_options=options, transport=transport_name)
- create_channel.assert_called_with(
- "aiplatform.googleapis.com:443",
- credentials=file_creds,
- credentials_file=None,
- quota_project_id=None,
- default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
- scopes=None,
- default_host="aiplatform.googleapis.com",
- ssl_credentials=None,
- options=[
- ("grpc.max_send_message_length", -1),
- ("grpc.max_receive_message_length", -1),
- ],
- )
-
def test_specialist_pool_service_client_client_options_from_dict():
with mock.patch(
@@ -626,6 +597,72 @@ def test_specialist_pool_service_client_client_options_from_dict():
)
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ SpecialistPoolServiceClient,
+ transports.SpecialistPoolServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ SpecialistPoolServiceAsyncClient,
+ transports.SpecialistPoolServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_specialist_pool_service_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
@pytest.mark.parametrize(
"request_type", [specialist_pool_service.CreateSpecialistPoolRequest, dict,]
)
diff --git a/tests/unit/gapic/aiplatform_v1/test_tensorboard_service.py b/tests/unit/gapic/aiplatform_v1/test_tensorboard_service.py
index 27782cd2d9..7082279659 100644
--- a/tests/unit/gapic/aiplatform_v1/test_tensorboard_service.py
+++ b/tests/unit/gapic/aiplatform_v1/test_tensorboard_service.py
@@ -581,38 +581,6 @@ def test_tensorboard_service_client_client_options_credentials_file(
always_use_jwt_access=True,
)
- if "grpc" in transport_name:
- # test that the credentials from file are saved and used as the credentials.
- with mock.patch.object(
- google.auth, "load_credentials_from_file", autospec=True
- ) as load_creds, mock.patch.object(
- google.auth, "default", autospec=True
- ) as adc, mock.patch.object(
- grpc_helpers, "create_channel"
- ) as create_channel:
- creds = ga_credentials.AnonymousCredentials()
- file_creds = ga_credentials.AnonymousCredentials()
- load_creds.return_value = (file_creds, None)
- adc.return_value = (creds, None)
- client = client_class(client_options=options, transport=transport_name)
- create_channel.assert_called_with(
- "aiplatform.googleapis.com:443",
- credentials=file_creds,
- credentials_file=None,
- quota_project_id=None,
- default_scopes=(
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- ),
- scopes=None,
- default_host="aiplatform.googleapis.com",
- ssl_credentials=None,
- options=[
- ("grpc.max_send_message_length", -1),
- ("grpc.max_receive_message_length", -1),
- ],
- )
-
def test_tensorboard_service_client_client_options_from_dict():
with mock.patch(
@@ -634,6 +602,75 @@ def test_tensorboard_service_client_client_options_from_dict():
)
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ TensorboardServiceClient,
+ transports.TensorboardServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ TensorboardServiceAsyncClient,
+ transports.TensorboardServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_tensorboard_service_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=(
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ ),
+ scopes=None,
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
@pytest.mark.parametrize(
"request_type", [tensorboard_service.CreateTensorboardRequest, dict,]
)
diff --git a/tests/unit/gapic/aiplatform_v1/test_vizier_service.py b/tests/unit/gapic/aiplatform_v1/test_vizier_service.py
index b8fca485de..fd95f016d1 100644
--- a/tests/unit/gapic/aiplatform_v1/test_vizier_service.py
+++ b/tests/unit/gapic/aiplatform_v1/test_vizier_service.py
@@ -554,35 +554,6 @@ def test_vizier_service_client_client_options_credentials_file(
always_use_jwt_access=True,
)
- if "grpc" in transport_name:
- # test that the credentials from file are saved and used as the credentials.
- with mock.patch.object(
- google.auth, "load_credentials_from_file", autospec=True
- ) as load_creds, mock.patch.object(
- google.auth, "default", autospec=True
- ) as adc, mock.patch.object(
- grpc_helpers, "create_channel"
- ) as create_channel:
- creds = ga_credentials.AnonymousCredentials()
- file_creds = ga_credentials.AnonymousCredentials()
- load_creds.return_value = (file_creds, None)
- adc.return_value = (creds, None)
- client = client_class(client_options=options, transport=transport_name)
- create_channel.assert_called_with(
- "aiplatform.googleapis.com:443",
- credentials=file_creds,
- credentials_file=None,
- quota_project_id=None,
- default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
- scopes=None,
- default_host="aiplatform.googleapis.com",
- ssl_credentials=None,
- options=[
- ("grpc.max_send_message_length", -1),
- ("grpc.max_receive_message_length", -1),
- ],
- )
-
def test_vizier_service_client_client_options_from_dict():
with mock.patch(
@@ -604,6 +575,72 @@ def test_vizier_service_client_client_options_from_dict():
)
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ VizierServiceClient,
+ transports.VizierServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ VizierServiceAsyncClient,
+ transports.VizierServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_vizier_service_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
@pytest.mark.parametrize("request_type", [vizier_service.CreateStudyRequest, dict,])
def test_create_study(request_type, transport: str = "grpc"):
client = VizierServiceClient(
diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py
index 6951f7cb3f..bb8b8c8caa 100644
--- a/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py
+++ b/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py
@@ -564,35 +564,6 @@ def test_dataset_service_client_client_options_credentials_file(
always_use_jwt_access=True,
)
- if "grpc" in transport_name:
- # test that the credentials from file are saved and used as the credentials.
- with mock.patch.object(
- google.auth, "load_credentials_from_file", autospec=True
- ) as load_creds, mock.patch.object(
- google.auth, "default", autospec=True
- ) as adc, mock.patch.object(
- grpc_helpers, "create_channel"
- ) as create_channel:
- creds = ga_credentials.AnonymousCredentials()
- file_creds = ga_credentials.AnonymousCredentials()
- load_creds.return_value = (file_creds, None)
- adc.return_value = (creds, None)
- client = client_class(client_options=options, transport=transport_name)
- create_channel.assert_called_with(
- "aiplatform.googleapis.com:443",
- credentials=file_creds,
- credentials_file=None,
- quota_project_id=None,
- default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
- scopes=None,
- default_host="aiplatform.googleapis.com",
- ssl_credentials=None,
- options=[
- ("grpc.max_send_message_length", -1),
- ("grpc.max_receive_message_length", -1),
- ],
- )
-
def test_dataset_service_client_client_options_from_dict():
with mock.patch(
@@ -614,6 +585,72 @@ def test_dataset_service_client_client_options_from_dict():
)
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ DatasetServiceClient,
+ transports.DatasetServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ DatasetServiceAsyncClient,
+ transports.DatasetServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_dataset_service_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
@pytest.mark.parametrize("request_type", [dataset_service.CreateDatasetRequest, dict,])
def test_create_dataset(request_type, transport: str = "grpc"):
client = DatasetServiceClient(
diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py
index 504fd06f2f..10ce05bc17 100644
--- a/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py
+++ b/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py
@@ -575,35 +575,6 @@ def test_endpoint_service_client_client_options_credentials_file(
always_use_jwt_access=True,
)
- if "grpc" in transport_name:
- # test that the credentials from file are saved and used as the credentials.
- with mock.patch.object(
- google.auth, "load_credentials_from_file", autospec=True
- ) as load_creds, mock.patch.object(
- google.auth, "default", autospec=True
- ) as adc, mock.patch.object(
- grpc_helpers, "create_channel"
- ) as create_channel:
- creds = ga_credentials.AnonymousCredentials()
- file_creds = ga_credentials.AnonymousCredentials()
- load_creds.return_value = (file_creds, None)
- adc.return_value = (creds, None)
- client = client_class(client_options=options, transport=transport_name)
- create_channel.assert_called_with(
- "aiplatform.googleapis.com:443",
- credentials=file_creds,
- credentials_file=None,
- quota_project_id=None,
- default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
- scopes=None,
- default_host="aiplatform.googleapis.com",
- ssl_credentials=None,
- options=[
- ("grpc.max_send_message_length", -1),
- ("grpc.max_receive_message_length", -1),
- ],
- )
-
def test_endpoint_service_client_client_options_from_dict():
with mock.patch(
@@ -625,6 +596,72 @@ def test_endpoint_service_client_client_options_from_dict():
)
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ EndpointServiceClient,
+ transports.EndpointServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ EndpointServiceAsyncClient,
+ transports.EndpointServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_endpoint_service_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
@pytest.mark.parametrize(
"request_type", [endpoint_service.CreateEndpointRequest, dict,]
)
diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_online_serving_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_online_serving_service.py
index 1b74c43563..eb8c878974 100644
--- a/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_online_serving_service.py
+++ b/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_online_serving_service.py
@@ -602,35 +602,6 @@ def test_featurestore_online_serving_service_client_client_options_credentials_f
always_use_jwt_access=True,
)
- if "grpc" in transport_name:
- # test that the credentials from file are saved and used as the credentials.
- with mock.patch.object(
- google.auth, "load_credentials_from_file", autospec=True
- ) as load_creds, mock.patch.object(
- google.auth, "default", autospec=True
- ) as adc, mock.patch.object(
- grpc_helpers, "create_channel"
- ) as create_channel:
- creds = ga_credentials.AnonymousCredentials()
- file_creds = ga_credentials.AnonymousCredentials()
- load_creds.return_value = (file_creds, None)
- adc.return_value = (creds, None)
- client = client_class(client_options=options, transport=transport_name)
- create_channel.assert_called_with(
- "aiplatform.googleapis.com:443",
- credentials=file_creds,
- credentials_file=None,
- quota_project_id=None,
- default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
- scopes=None,
- default_host="aiplatform.googleapis.com",
- ssl_credentials=None,
- options=[
- ("grpc.max_send_message_length", -1),
- ("grpc.max_receive_message_length", -1),
- ],
- )
-
def test_featurestore_online_serving_service_client_client_options_from_dict():
with mock.patch(
@@ -652,6 +623,72 @@ def test_featurestore_online_serving_service_client_client_options_from_dict():
)
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ FeaturestoreOnlineServingServiceClient,
+ transports.FeaturestoreOnlineServingServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ FeaturestoreOnlineServingServiceAsyncClient,
+ transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_featurestore_online_serving_service_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
@pytest.mark.parametrize(
"request_type", [featurestore_online_service.ReadFeatureValuesRequest, dict,]
)
diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py
index f1c3dbc2b7..0ce2e0d356 100644
--- a/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py
+++ b/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py
@@ -587,35 +587,6 @@ def test_featurestore_service_client_client_options_credentials_file(
always_use_jwt_access=True,
)
- if "grpc" in transport_name:
- # test that the credentials from file are saved and used as the credentials.
- with mock.patch.object(
- google.auth, "load_credentials_from_file", autospec=True
- ) as load_creds, mock.patch.object(
- google.auth, "default", autospec=True
- ) as adc, mock.patch.object(
- grpc_helpers, "create_channel"
- ) as create_channel:
- creds = ga_credentials.AnonymousCredentials()
- file_creds = ga_credentials.AnonymousCredentials()
- load_creds.return_value = (file_creds, None)
- adc.return_value = (creds, None)
- client = client_class(client_options=options, transport=transport_name)
- create_channel.assert_called_with(
- "aiplatform.googleapis.com:443",
- credentials=file_creds,
- credentials_file=None,
- quota_project_id=None,
- default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
- scopes=None,
- default_host="aiplatform.googleapis.com",
- ssl_credentials=None,
- options=[
- ("grpc.max_send_message_length", -1),
- ("grpc.max_receive_message_length", -1),
- ],
- )
-
def test_featurestore_service_client_client_options_from_dict():
with mock.patch(
@@ -637,6 +608,72 @@ def test_featurestore_service_client_client_options_from_dict():
)
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ FeaturestoreServiceClient,
+ transports.FeaturestoreServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ FeaturestoreServiceAsyncClient,
+ transports.FeaturestoreServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_featurestore_service_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
@pytest.mark.parametrize(
"request_type", [featurestore_service.CreateFeaturestoreRequest, dict,]
)
diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py
index c90d804f64..14b92cb278 100644
--- a/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py
+++ b/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py
@@ -579,35 +579,6 @@ def test_index_endpoint_service_client_client_options_credentials_file(
always_use_jwt_access=True,
)
- if "grpc" in transport_name:
- # test that the credentials from file are saved and used as the credentials.
- with mock.patch.object(
- google.auth, "load_credentials_from_file", autospec=True
- ) as load_creds, mock.patch.object(
- google.auth, "default", autospec=True
- ) as adc, mock.patch.object(
- grpc_helpers, "create_channel"
- ) as create_channel:
- creds = ga_credentials.AnonymousCredentials()
- file_creds = ga_credentials.AnonymousCredentials()
- load_creds.return_value = (file_creds, None)
- adc.return_value = (creds, None)
- client = client_class(client_options=options, transport=transport_name)
- create_channel.assert_called_with(
- "aiplatform.googleapis.com:443",
- credentials=file_creds,
- credentials_file=None,
- quota_project_id=None,
- default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
- scopes=None,
- default_host="aiplatform.googleapis.com",
- ssl_credentials=None,
- options=[
- ("grpc.max_send_message_length", -1),
- ("grpc.max_receive_message_length", -1),
- ],
- )
-
def test_index_endpoint_service_client_client_options_from_dict():
with mock.patch(
@@ -629,6 +600,72 @@ def test_index_endpoint_service_client_client_options_from_dict():
)
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ IndexEndpointServiceClient,
+ transports.IndexEndpointServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ IndexEndpointServiceAsyncClient,
+ transports.IndexEndpointServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_index_endpoint_service_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
@pytest.mark.parametrize(
"request_type", [index_endpoint_service.CreateIndexEndpointRequest, dict,]
)
diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_index_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_index_service.py
index 4babf78b58..199f562eff 100644
--- a/tests/unit/gapic/aiplatform_v1beta1/test_index_service.py
+++ b/tests/unit/gapic/aiplatform_v1beta1/test_index_service.py
@@ -543,35 +543,6 @@ def test_index_service_client_client_options_credentials_file(
always_use_jwt_access=True,
)
- if "grpc" in transport_name:
- # test that the credentials from file are saved and used as the credentials.
- with mock.patch.object(
- google.auth, "load_credentials_from_file", autospec=True
- ) as load_creds, mock.patch.object(
- google.auth, "default", autospec=True
- ) as adc, mock.patch.object(
- grpc_helpers, "create_channel"
- ) as create_channel:
- creds = ga_credentials.AnonymousCredentials()
- file_creds = ga_credentials.AnonymousCredentials()
- load_creds.return_value = (file_creds, None)
- adc.return_value = (creds, None)
- client = client_class(client_options=options, transport=transport_name)
- create_channel.assert_called_with(
- "aiplatform.googleapis.com:443",
- credentials=file_creds,
- credentials_file=None,
- quota_project_id=None,
- default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
- scopes=None,
- default_host="aiplatform.googleapis.com",
- ssl_credentials=None,
- options=[
- ("grpc.max_send_message_length", -1),
- ("grpc.max_receive_message_length", -1),
- ],
- )
-
def test_index_service_client_client_options_from_dict():
with mock.patch(
@@ -591,6 +562,72 @@ def test_index_service_client_client_options_from_dict():
)
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ IndexServiceClient,
+ transports.IndexServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ IndexServiceAsyncClient,
+ transports.IndexServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_index_service_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
@pytest.mark.parametrize("request_type", [index_service.CreateIndexRequest, dict,])
def test_create_index(request_type, transport: str = "grpc"):
client = IndexServiceClient(
diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py
index 1190c19757..e19bf12473 100644
--- a/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py
+++ b/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py
@@ -569,35 +569,6 @@ def test_job_service_client_client_options_credentials_file(
always_use_jwt_access=True,
)
- if "grpc" in transport_name:
- # test that the credentials from file are saved and used as the credentials.
- with mock.patch.object(
- google.auth, "load_credentials_from_file", autospec=True
- ) as load_creds, mock.patch.object(
- google.auth, "default", autospec=True
- ) as adc, mock.patch.object(
- grpc_helpers, "create_channel"
- ) as create_channel:
- creds = ga_credentials.AnonymousCredentials()
- file_creds = ga_credentials.AnonymousCredentials()
- load_creds.return_value = (file_creds, None)
- adc.return_value = (creds, None)
- client = client_class(client_options=options, transport=transport_name)
- create_channel.assert_called_with(
- "aiplatform.googleapis.com:443",
- credentials=file_creds,
- credentials_file=None,
- quota_project_id=None,
- default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
- scopes=None,
- default_host="aiplatform.googleapis.com",
- ssl_credentials=None,
- options=[
- ("grpc.max_send_message_length", -1),
- ("grpc.max_receive_message_length", -1),
- ],
- )
-
def test_job_service_client_client_options_from_dict():
with mock.patch(
@@ -617,6 +588,67 @@ def test_job_service_client_client_options_from_dict():
)
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (JobServiceClient, transports.JobServiceGrpcTransport, "grpc", grpc_helpers),
+ (
+ JobServiceAsyncClient,
+ transports.JobServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_job_service_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
@pytest.mark.parametrize("request_type", [job_service.CreateCustomJobRequest, dict,])
def test_create_custom_job(request_type, transport: str = "grpc"):
client = JobServiceClient(
diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py
index 9768bec18b..1ad7f4f193 100644
--- a/tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py
+++ b/tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py
@@ -580,35 +580,6 @@ def test_metadata_service_client_client_options_credentials_file(
always_use_jwt_access=True,
)
- if "grpc" in transport_name:
- # test that the credentials from file are saved and used as the credentials.
- with mock.patch.object(
- google.auth, "load_credentials_from_file", autospec=True
- ) as load_creds, mock.patch.object(
- google.auth, "default", autospec=True
- ) as adc, mock.patch.object(
- grpc_helpers, "create_channel"
- ) as create_channel:
- creds = ga_credentials.AnonymousCredentials()
- file_creds = ga_credentials.AnonymousCredentials()
- load_creds.return_value = (file_creds, None)
- adc.return_value = (creds, None)
- client = client_class(client_options=options, transport=transport_name)
- create_channel.assert_called_with(
- "aiplatform.googleapis.com:443",
- credentials=file_creds,
- credentials_file=None,
- quota_project_id=None,
- default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
- scopes=None,
- default_host="aiplatform.googleapis.com",
- ssl_credentials=None,
- options=[
- ("grpc.max_send_message_length", -1),
- ("grpc.max_receive_message_length", -1),
- ],
- )
-
def test_metadata_service_client_client_options_from_dict():
with mock.patch(
@@ -630,6 +601,72 @@ def test_metadata_service_client_client_options_from_dict():
)
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ MetadataServiceClient,
+ transports.MetadataServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ MetadataServiceAsyncClient,
+ transports.MetadataServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_metadata_service_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
@pytest.mark.parametrize(
"request_type", [metadata_service.CreateMetadataStoreRequest, dict,]
)
diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py
index 7f4b3a2060..a932008054 100644
--- a/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py
+++ b/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py
@@ -565,35 +565,6 @@ def test_migration_service_client_client_options_credentials_file(
always_use_jwt_access=True,
)
- if "grpc" in transport_name:
- # test that the credentials from file are saved and used as the credentials.
- with mock.patch.object(
- google.auth, "load_credentials_from_file", autospec=True
- ) as load_creds, mock.patch.object(
- google.auth, "default", autospec=True
- ) as adc, mock.patch.object(
- grpc_helpers, "create_channel"
- ) as create_channel:
- creds = ga_credentials.AnonymousCredentials()
- file_creds = ga_credentials.AnonymousCredentials()
- load_creds.return_value = (file_creds, None)
- adc.return_value = (creds, None)
- client = client_class(client_options=options, transport=transport_name)
- create_channel.assert_called_with(
- "aiplatform.googleapis.com:443",
- credentials=file_creds,
- credentials_file=None,
- quota_project_id=None,
- default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
- scopes=None,
- default_host="aiplatform.googleapis.com",
- ssl_credentials=None,
- options=[
- ("grpc.max_send_message_length", -1),
- ("grpc.max_receive_message_length", -1),
- ],
- )
-
def test_migration_service_client_client_options_from_dict():
with mock.patch(
@@ -615,6 +586,72 @@ def test_migration_service_client_client_options_from_dict():
)
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ MigrationServiceClient,
+ transports.MigrationServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ MigrationServiceAsyncClient,
+ transports.MigrationServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_migration_service_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
@pytest.mark.parametrize(
"request_type", [migration_service.SearchMigratableResourcesRequest, dict,]
)
diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py
index 0e20af1d5f..5e80198696 100644
--- a/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py
+++ b/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py
@@ -550,35 +550,6 @@ def test_model_service_client_client_options_credentials_file(
always_use_jwt_access=True,
)
- if "grpc" in transport_name:
- # test that the credentials from file are saved and used as the credentials.
- with mock.patch.object(
- google.auth, "load_credentials_from_file", autospec=True
- ) as load_creds, mock.patch.object(
- google.auth, "default", autospec=True
- ) as adc, mock.patch.object(
- grpc_helpers, "create_channel"
- ) as create_channel:
- creds = ga_credentials.AnonymousCredentials()
- file_creds = ga_credentials.AnonymousCredentials()
- load_creds.return_value = (file_creds, None)
- adc.return_value = (creds, None)
- client = client_class(client_options=options, transport=transport_name)
- create_channel.assert_called_with(
- "aiplatform.googleapis.com:443",
- credentials=file_creds,
- credentials_file=None,
- quota_project_id=None,
- default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
- scopes=None,
- default_host="aiplatform.googleapis.com",
- ssl_credentials=None,
- options=[
- ("grpc.max_send_message_length", -1),
- ("grpc.max_receive_message_length", -1),
- ],
- )
-
def test_model_service_client_client_options_from_dict():
with mock.patch(
@@ -598,6 +569,72 @@ def test_model_service_client_client_options_from_dict():
)
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ ModelServiceClient,
+ transports.ModelServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ ModelServiceAsyncClient,
+ transports.ModelServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_model_service_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
@pytest.mark.parametrize("request_type", [model_service.UploadModelRequest, dict,])
def test_upload_model(request_type, transport: str = "grpc"):
client = ModelServiceClient(
diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py
index 5d42510a2a..a24c62397b 100644
--- a/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py
+++ b/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py
@@ -587,35 +587,6 @@ def test_pipeline_service_client_client_options_credentials_file(
always_use_jwt_access=True,
)
- if "grpc" in transport_name:
- # test that the credentials from file are saved and used as the credentials.
- with mock.patch.object(
- google.auth, "load_credentials_from_file", autospec=True
- ) as load_creds, mock.patch.object(
- google.auth, "default", autospec=True
- ) as adc, mock.patch.object(
- grpc_helpers, "create_channel"
- ) as create_channel:
- creds = ga_credentials.AnonymousCredentials()
- file_creds = ga_credentials.AnonymousCredentials()
- load_creds.return_value = (file_creds, None)
- adc.return_value = (creds, None)
- client = client_class(client_options=options, transport=transport_name)
- create_channel.assert_called_with(
- "aiplatform.googleapis.com:443",
- credentials=file_creds,
- credentials_file=None,
- quota_project_id=None,
- default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
- scopes=None,
- default_host="aiplatform.googleapis.com",
- ssl_credentials=None,
- options=[
- ("grpc.max_send_message_length", -1),
- ("grpc.max_receive_message_length", -1),
- ],
- )
-
def test_pipeline_service_client_client_options_from_dict():
with mock.patch(
@@ -637,6 +608,72 @@ def test_pipeline_service_client_client_options_from_dict():
)
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ PipelineServiceClient,
+ transports.PipelineServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ PipelineServiceAsyncClient,
+ transports.PipelineServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_pipeline_service_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
@pytest.mark.parametrize(
"request_type", [pipeline_service.CreateTrainingPipelineRequest, dict,]
)
diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py
index fa04aed2a7..77636c11f4 100644
--- a/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py
+++ b/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py
@@ -563,35 +563,6 @@ def test_prediction_service_client_client_options_credentials_file(
always_use_jwt_access=True,
)
- if "grpc" in transport_name:
- # test that the credentials from file are saved and used as the credentials.
- with mock.patch.object(
- google.auth, "load_credentials_from_file", autospec=True
- ) as load_creds, mock.patch.object(
- google.auth, "default", autospec=True
- ) as adc, mock.patch.object(
- grpc_helpers, "create_channel"
- ) as create_channel:
- creds = ga_credentials.AnonymousCredentials()
- file_creds = ga_credentials.AnonymousCredentials()
- load_creds.return_value = (file_creds, None)
- adc.return_value = (creds, None)
- client = client_class(client_options=options, transport=transport_name)
- create_channel.assert_called_with(
- "aiplatform.googleapis.com:443",
- credentials=file_creds,
- credentials_file=None,
- quota_project_id=None,
- default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
- scopes=None,
- default_host="aiplatform.googleapis.com",
- ssl_credentials=None,
- options=[
- ("grpc.max_send_message_length", -1),
- ("grpc.max_receive_message_length", -1),
- ],
- )
-
def test_prediction_service_client_client_options_from_dict():
with mock.patch(
@@ -613,6 +584,72 @@ def test_prediction_service_client_client_options_from_dict():
)
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ PredictionServiceClient,
+ transports.PredictionServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ PredictionServiceAsyncClient,
+ transports.PredictionServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_prediction_service_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
@pytest.mark.parametrize("request_type", [prediction_service.PredictRequest, dict,])
def test_predict(request_type, transport: str = "grpc"):
client = PredictionServiceClient(
diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py
index c55119cdd4..affb44893b 100644
--- a/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py
+++ b/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py
@@ -576,35 +576,6 @@ def test_specialist_pool_service_client_client_options_credentials_file(
always_use_jwt_access=True,
)
- if "grpc" in transport_name:
- # test that the credentials from file are saved and used as the credentials.
- with mock.patch.object(
- google.auth, "load_credentials_from_file", autospec=True
- ) as load_creds, mock.patch.object(
- google.auth, "default", autospec=True
- ) as adc, mock.patch.object(
- grpc_helpers, "create_channel"
- ) as create_channel:
- creds = ga_credentials.AnonymousCredentials()
- file_creds = ga_credentials.AnonymousCredentials()
- load_creds.return_value = (file_creds, None)
- adc.return_value = (creds, None)
- client = client_class(client_options=options, transport=transport_name)
- create_channel.assert_called_with(
- "aiplatform.googleapis.com:443",
- credentials=file_creds,
- credentials_file=None,
- quota_project_id=None,
- default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
- scopes=None,
- default_host="aiplatform.googleapis.com",
- ssl_credentials=None,
- options=[
- ("grpc.max_send_message_length", -1),
- ("grpc.max_receive_message_length", -1),
- ],
- )
-
def test_specialist_pool_service_client_client_options_from_dict():
with mock.patch(
@@ -626,6 +597,72 @@ def test_specialist_pool_service_client_client_options_from_dict():
)
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ SpecialistPoolServiceClient,
+ transports.SpecialistPoolServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ SpecialistPoolServiceAsyncClient,
+ transports.SpecialistPoolServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_specialist_pool_service_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
@pytest.mark.parametrize(
"request_type", [specialist_pool_service.CreateSpecialistPoolRequest, dict,]
)
diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py
index 2fb4c03afc..d73c3ebe17 100644
--- a/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py
+++ b/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py
@@ -581,35 +581,6 @@ def test_tensorboard_service_client_client_options_credentials_file(
always_use_jwt_access=True,
)
- if "grpc" in transport_name:
- # test that the credentials from file are saved and used as the credentials.
- with mock.patch.object(
- google.auth, "load_credentials_from_file", autospec=True
- ) as load_creds, mock.patch.object(
- google.auth, "default", autospec=True
- ) as adc, mock.patch.object(
- grpc_helpers, "create_channel"
- ) as create_channel:
- creds = ga_credentials.AnonymousCredentials()
- file_creds = ga_credentials.AnonymousCredentials()
- load_creds.return_value = (file_creds, None)
- adc.return_value = (creds, None)
- client = client_class(client_options=options, transport=transport_name)
- create_channel.assert_called_with(
- "aiplatform.googleapis.com:443",
- credentials=file_creds,
- credentials_file=None,
- quota_project_id=None,
- default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
- scopes=None,
- default_host="aiplatform.googleapis.com",
- ssl_credentials=None,
- options=[
- ("grpc.max_send_message_length", -1),
- ("grpc.max_receive_message_length", -1),
- ],
- )
-
def test_tensorboard_service_client_client_options_from_dict():
with mock.patch(
@@ -631,6 +602,72 @@ def test_tensorboard_service_client_client_options_from_dict():
)
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ TensorboardServiceClient,
+ transports.TensorboardServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ TensorboardServiceAsyncClient,
+ transports.TensorboardServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_tensorboard_service_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
@pytest.mark.parametrize(
"request_type", [tensorboard_service.CreateTensorboardRequest, dict,]
)
diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py
index a0331ed3fc..c800d0a837 100644
--- a/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py
+++ b/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py
@@ -556,35 +556,6 @@ def test_vizier_service_client_client_options_credentials_file(
always_use_jwt_access=True,
)
- if "grpc" in transport_name:
- # test that the credentials from file are saved and used as the credentials.
- with mock.patch.object(
- google.auth, "load_credentials_from_file", autospec=True
- ) as load_creds, mock.patch.object(
- google.auth, "default", autospec=True
- ) as adc, mock.patch.object(
- grpc_helpers, "create_channel"
- ) as create_channel:
- creds = ga_credentials.AnonymousCredentials()
- file_creds = ga_credentials.AnonymousCredentials()
- load_creds.return_value = (file_creds, None)
- adc.return_value = (creds, None)
- client = client_class(client_options=options, transport=transport_name)
- create_channel.assert_called_with(
- "aiplatform.googleapis.com:443",
- credentials=file_creds,
- credentials_file=None,
- quota_project_id=None,
- default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
- scopes=None,
- default_host="aiplatform.googleapis.com",
- ssl_credentials=None,
- options=[
- ("grpc.max_send_message_length", -1),
- ("grpc.max_receive_message_length", -1),
- ],
- )
-
def test_vizier_service_client_client_options_from_dict():
with mock.patch(
@@ -606,6 +577,72 @@ def test_vizier_service_client_client_options_from_dict():
)
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,grpc_helpers",
+ [
+ (
+ VizierServiceClient,
+ transports.VizierServiceGrpcTransport,
+ "grpc",
+ grpc_helpers,
+ ),
+ (
+ VizierServiceAsyncClient,
+ transports.VizierServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ grpc_helpers_async,
+ ),
+ ],
+)
+def test_vizier_service_client_create_channel_credentials_file(
+ client_class, transport_class, transport_name, grpc_helpers
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options, transport=transport_name)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
+
+ # test that the credentials from file are saved and used as the credentials.
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel"
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ file_creds = ga_credentials.AnonymousCredentials()
+ load_creds.return_value = (file_creds, None)
+ adc.return_value = (creds, None)
+ client = client_class(client_options=options, transport=transport_name)
+ create_channel.assert_called_with(
+ "aiplatform.googleapis.com:443",
+ credentials=file_creds,
+ credentials_file=None,
+ quota_project_id=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=None,
+ default_host="aiplatform.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
@pytest.mark.parametrize("request_type", [vizier_service.CreateStudyRequest, dict,])
def test_create_study(request_type, transport: str = "grpc"):
client = VizierServiceClient(
From 095bea23bc15a490ddbb1a8edac7f5db626bc659 Mon Sep 17 00:00:00 2001
From: Morgan Du
Date: Wed, 23 Feb 2022 21:13:58 -0800
Subject: [PATCH 06/15] fix: enforce bq SchemaField field_type and mode using
feature value_type (#1019)
* samples: add feature store samples
* fix: force bq has a data type for temp table before ingestion
* Revert "samples: add feature store samples"
This reverts commit 24ece4d4991e0fb5bc0380f552209f902714609a.
* fix: double to float64
* fix: add job_config for repeated data type
* fix: remove print
* fix: bq_schema and tests
* fix: add unit tests for get_bq_schema and ic tests for string array ingestion validation
* fix compat service init misplace fs version
* fix: unit tests by adding assert for bq schema field mock
---
.../aiplatform/compat/services/__init__.py | 8 +-
.../aiplatform/featurestore/entity_type.py | 51 +++++++++-
.../aiplatform/utils/featurestore_utils.py | 12 +++
tests/system/aiplatform/test_featurestore.py | 14 +--
tests/unit/aiplatform/test_featurestores.py | 98 ++++++++++++++++---
5 files changed, 155 insertions(+), 28 deletions(-)
diff --git a/google/cloud/aiplatform/compat/services/__init__.py b/google/cloud/aiplatform/compat/services/__init__.py
index f8545a688c..10b02c483c 100644
--- a/google/cloud/aiplatform/compat/services/__init__.py
+++ b/google/cloud/aiplatform/compat/services/__init__.py
@@ -87,8 +87,8 @@
# v1
dataset_service_client_v1,
endpoint_service_client_v1,
- featurestore_online_serving_service_client_v1beta1,
- featurestore_service_client_v1beta1,
+ featurestore_online_serving_service_client_v1,
+ featurestore_service_client_v1,
job_service_client_v1,
metadata_service_client_v1,
model_service_client_v1,
@@ -99,8 +99,8 @@
# v1beta1
dataset_service_client_v1beta1,
endpoint_service_client_v1beta1,
- featurestore_online_serving_service_client_v1,
- featurestore_service_client_v1,
+ featurestore_online_serving_service_client_v1beta1,
+ featurestore_service_client_v1beta1,
job_service_client_v1beta1,
model_service_client_v1beta1,
pipeline_service_client_v1beta1,
diff --git a/google/cloud/aiplatform/featurestore/entity_type.py b/google/cloud/aiplatform/featurestore/entity_type.py
index 274f89d2aa..7fc0a13965 100644
--- a/google/cloud/aiplatform/featurestore/entity_type.py
+++ b/google/cloud/aiplatform/featurestore/entity_type.py
@@ -1238,6 +1238,17 @@ def ingest_from_df(
)
self.wait()
+
+ feature_source_fields = feature_source_fields or {}
+ bq_schema = []
+ for feature_id in feature_ids:
+ feature_field_name = feature_source_fields.get(feature_id, feature_id)
+ feature_value_type = self.get_feature(feature_id).to_dict()["valueType"]
+ bq_schema_field = self._get_bq_schema_field(
+ feature_field_name, feature_value_type
+ )
+ bq_schema.append(bq_schema_field)
+
entity_type_name_components = self._parse_resource_name(self.resource_name)
featurestore_id, entity_type_id = (
entity_type_name_components["featurestore"],
@@ -1260,8 +1271,20 @@ def ingest_from_df(
temp_bq_dataset = bigquery_client.create_dataset(temp_bq_dataset)
try:
+
+ parquet_options = bigquery.format_options.ParquetOptions()
+ parquet_options.enable_list_inference = True
+
+ job_config = bigquery.LoadJobConfig(
+ schema=bq_schema,
+ source_format=bigquery.SourceFormat.PARQUET,
+ parquet_options=parquet_options,
+ )
+
job = bigquery_client.load_table_from_dataframe(
- dataframe=df_source, destination=temp_bq_table_id
+ dataframe=df_source,
+ destination=temp_bq_table_id,
+ job_config=job_config,
)
job.result()
@@ -1281,6 +1304,32 @@ def ingest_from_df(
return entity_type_obj
+ @staticmethod
+ def _get_bq_schema_field(
+ name: str, feature_value_type: str
+ ) -> bigquery.SchemaField:
+ """Helper method to get BigQuery Schema Field.
+
+ Args:
+ name (str):
+ Required. The name of the schema field, which can be either the feature_id,
+ or the field_name in BigQuery for the feature if different than the feature_id.
+ feature_value_type (str):
+ Required. The feature value_type.
+
+ Returns:
+ bigquery.SchemaField: bigquery.SchemaField
+ """
+ bq_data_type = utils.featurestore_utils.FEATURE_STORE_VALUE_TYPE_TO_BQ_DATA_TYPE_MAP[
+ feature_value_type
+ ]
+ bq_schema_field = bigquery.SchemaField(
+ name=name,
+ field_type=bq_data_type["field_type"],
+ mode=bq_data_type.get("mode") or "NULLABLE",
+ )
+ return bq_schema_field
+
@staticmethod
def _instantiate_featurestore_online_client(
location: Optional[str] = None,
diff --git a/google/cloud/aiplatform/utils/featurestore_utils.py b/google/cloud/aiplatform/utils/featurestore_utils.py
index 45dbbbf44f..392773661e 100644
--- a/google/cloud/aiplatform/utils/featurestore_utils.py
+++ b/google/cloud/aiplatform/utils/featurestore_utils.py
@@ -33,6 +33,18 @@
_FEATURE_VALUE_TYPE_UNSPECIFIED = "VALUE_TYPE_UNSPECIFIED"
+FEATURE_STORE_VALUE_TYPE_TO_BQ_DATA_TYPE_MAP = {
+ "BOOL": {"field_type": "BOOL"},
+ "BOOL_ARRAY": {"field_type": "BOOL", "mode": "REPEATED"},
+ "DOUBLE": {"field_type": "FLOAT64"},
+ "DOUBLE_ARRAY": {"field_type": "FLOAT64", "mode": "REPEATED"},
+ "INT64": {"field_type": "INT64"},
+ "INT64_ARRAY": {"field_type": "INT64", "mode": "REPEATED"},
+ "STRING": {"field_type": "STRING"},
+ "STRING_ARRAY": {"field_type": "STRING", "mode": "REPEATED"},
+ "BYTES": {"field_type": "BYTES"},
+}
+
def validate_id(resource_id: str) -> None:
"""Validates feature store resource ID pattern.
diff --git a/tests/system/aiplatform/test_featurestore.py b/tests/system/aiplatform/test_featurestore.py
index cbbfd82efb..9adabcaf3b 100644
--- a/tests/system/aiplatform/test_featurestore.py
+++ b/tests/system/aiplatform/test_featurestore.py
@@ -219,7 +219,7 @@ def test_batch_create_features(self, shared_state):
movie_feature_configs = {
_TEST_MOVIE_TITLE_FEATURE_ID: {"value_type": "STRING"},
- _TEST_MOVIE_GENRES_FEATURE_ID: {"value_type": "STRING"},
+ _TEST_MOVIE_GENRES_FEATURE_ID: {"value_type": "STRING_ARRAY"},
_TEST_MOVIE_AVERAGE_RATING_FEATURE_ID: {"value_type": "DOUBLE"},
}
@@ -277,14 +277,14 @@ def test_ingest_feature_values_from_df_using_feature_time_column_and_online_read
"movie_id": "movie_01",
"average_rating": 4.9,
"title": "The Shawshank Redemption",
- "genres": "Drama",
+ "genres": ["Drama"],
"update_time": "2021-08-20 20:44:11.094375+00:00",
},
{
"movie_id": "movie_02",
"average_rating": 4.2,
"title": "The Shining",
- "genres": "Horror",
+ "genres": ["Horror"],
"update_time": "2021-08-20 20:44:11.094375+00:00",
},
],
@@ -312,13 +312,13 @@ def test_ingest_feature_values_from_df_using_feature_time_column_and_online_read
"movie_id": "movie_01",
"average_rating": 4.9,
"title": "The Shawshank Redemption",
- "genres": "Drama",
+ "genres": ["Drama"],
},
{
"movie_id": "movie_02",
"average_rating": 4.2,
"title": "The Shining",
- "genres": "Horror",
+ "genres": ["Horror"],
},
]
expected_movie_entity_views_df_after_ingest = pd.DataFrame(
@@ -350,13 +350,13 @@ def test_ingest_feature_values_from_df_using_feature_time_datetime_and_online_re
"movie_id": "movie_03",
"average_rating": 4.5,
"title": "Cinema Paradiso",
- "genres": "Romance",
+ "genres": ["Romance"],
},
{
"movie_id": "movie_04",
"average_rating": 4.6,
"title": "The Dark Knight",
- "genres": "Action",
+ "genres": ["Action"],
},
],
columns=["movie_id", "average_rating", "title", "genres"],
diff --git a/tests/unit/aiplatform/test_featurestores.py b/tests/unit/aiplatform/test_featurestores.py
index df7d544d95..92c5be0f81 100644
--- a/tests/unit/aiplatform/test_featurestores.py
+++ b/tests/unit/aiplatform/test_featurestores.py
@@ -114,6 +114,8 @@
}
_TEST_FEATURE_VALUE_TYPE = _TEST_INT_TYPE
+_TEST_FEATURE_VALUE_TYPE_BQ_FIELD_TYPE = "INT64"
+_TEST_FEATURE_VALUE_TYPE_BQ_MODE = "NULLABLE"
_ARRAY_FEATURE_VALUE_TYPE_TO_GCA_TYPE_MAP = {
_TEST_BOOL_ARR_TYPE: gca_types.BoolArray,
@@ -211,6 +213,9 @@
"my_feature_id_1": {"value_type": _TEST_FEATURE_VALUE_TYPE_STR},
}
+_TEST_IMPORTING_FEATURE_ID = "my_feature_id_1"
+_TEST_IMPORTING_FEATURE_SOURCE_FIELD = "my_feature_id_1_source_field"
+
_TEST_IMPORTING_FEATURE_IDS = ["my_feature_id_1"]
_TEST_IMPORTING_FEATURE_SOURCE_FIELDS = {
@@ -363,22 +368,22 @@ def bq_init_dataset_mock(bq_dataset_mock):
@pytest.fixture
-def bq_create_dataset_mock(bq_init_client_mock):
- with patch.object(bigquery.Client, "create_dataset") as bq_create_dataset_mock:
+def bq_create_dataset_mock(bq_client_mock):
+ with patch.object(bq_client_mock, "create_dataset") as bq_create_dataset_mock:
yield bq_create_dataset_mock
@pytest.fixture
-def bq_load_table_from_dataframe_mock(bq_init_client_mock):
+def bq_load_table_from_dataframe_mock(bq_client_mock):
with patch.object(
- bigquery.Client, "load_table_from_dataframe"
+ bq_client_mock, "load_table_from_dataframe"
) as bq_load_table_from_dataframe_mock:
yield bq_load_table_from_dataframe_mock
@pytest.fixture
-def bq_delete_dataset_mock(bq_init_client_mock):
- with patch.object(bigquery.Client, "delete_dataset") as bq_delete_dataset_mock:
+def bq_delete_dataset_mock(bq_client_mock):
+ with patch.object(bq_client_mock, "delete_dataset") as bq_delete_dataset_mock:
yield bq_delete_dataset_mock
@@ -396,9 +401,9 @@ def bqs_init_client_mock(bqs_client_mock):
@pytest.fixture
-def bqs_create_read_session(bqs_init_client_mock):
+def bqs_create_read_session(bqs_client_mock):
with patch.object(
- bigquery_storage.BigQueryReadClient, "create_read_session"
+ bqs_client_mock, "create_read_session"
) as bqs_create_read_session:
read_session_proto = gcbqs_stream.ReadSession()
read_session_proto.streams = [gcbqs_stream.ReadStream()]
@@ -406,6 +411,19 @@ def bqs_create_read_session(bqs_init_client_mock):
yield bqs_create_read_session
+@pytest.fixture
+def bq_schema_field_mock():
+ mock = MagicMock(bigquery.SchemaField)
+ yield mock
+
+
+@pytest.fixture
+def bq_init_schema_field_mock(bq_schema_field_mock):
+ with patch.object(bigquery, "SchemaField") as bq_init_schema_field_mock:
+ bq_init_schema_field_mock.return_value = bq_schema_field_mock
+ yield bq_init_schema_field_mock
+
+
# All Featurestore Mocks
@pytest.fixture
def get_featurestore_mock():
@@ -1672,14 +1690,19 @@ def test_ingest_from_gcs_with_invalid_gcs_source_type(self):
@pytest.mark.usefixtures(
"get_entity_type_mock",
+ "get_feature_mock",
"bq_init_client_mock",
"bq_init_dataset_mock",
"bq_create_dataset_mock",
- "bq_load_table_from_dataframe_mock",
"bq_delete_dataset_mock",
)
@patch("uuid.uuid4", uuid_mock)
- def test_ingest_from_df_using_column(self, import_feature_values_mock):
+ def test_ingest_from_df_using_column(
+ self,
+ import_feature_values_mock,
+ bq_load_table_from_dataframe_mock,
+ bq_init_schema_field_mock,
+ ):
aiplatform.init(project=_TEST_PROJECT)
@@ -1701,7 +1724,7 @@ def test_ingest_from_df_using_column(self, import_feature_values_mock):
f"{expecte_temp_bq_dataset_id}.{_TEST_ENTITY_TYPE_ID}"
)
- true_import_feature_values_request = gca_featurestore_service.ImportFeatureValuesRequest(
+ expected_import_feature_values_request = gca_featurestore_service.ImportFeatureValuesRequest(
entity_type=_TEST_ENTITY_TYPE_NAME,
feature_specs=[
gca_featurestore_service.ImportFeatureValuesRequest.FeatureSpec(
@@ -1714,20 +1737,32 @@ def test_ingest_from_df_using_column(self, import_feature_values_mock):
feature_time_field=_TEST_FEATURE_TIME_FIELD,
)
+ bq_init_schema_field_mock.assert_called_once_with(
+ name=_TEST_IMPORTING_FEATURE_SOURCE_FIELD,
+ field_type=_TEST_FEATURE_VALUE_TYPE_BQ_FIELD_TYPE,
+ mode=_TEST_FEATURE_VALUE_TYPE_BQ_MODE,
+ )
+
import_feature_values_mock.assert_called_once_with(
- request=true_import_feature_values_request, metadata=_TEST_REQUEST_METADATA,
+ request=expected_import_feature_values_request,
+ metadata=_TEST_REQUEST_METADATA,
)
@pytest.mark.usefixtures(
"get_entity_type_mock",
+ "get_feature_mock",
"bq_init_client_mock",
"bq_init_dataset_mock",
"bq_create_dataset_mock",
- "bq_load_table_from_dataframe_mock",
"bq_delete_dataset_mock",
)
@patch("uuid.uuid4", uuid_mock)
- def test_ingest_from_df_using_datetime(self, import_feature_values_mock):
+ def test_ingest_from_df_using_datetime(
+ self,
+ import_feature_values_mock,
+ bq_load_table_from_dataframe_mock,
+ bq_init_schema_field_mock,
+ ):
aiplatform.init(project=_TEST_PROJECT)
my_entity_type = aiplatform.EntityType(entity_type_name=_TEST_ENTITY_TYPE_NAME)
@@ -1752,7 +1787,7 @@ def test_ingest_from_df_using_datetime(self, import_feature_values_mock):
timestamp_proto = timestamp_pb2.Timestamp()
timestamp_proto.FromDatetime(_TEST_FEATURE_TIME_DATETIME)
- true_import_feature_values_request = gca_featurestore_service.ImportFeatureValuesRequest(
+ expected_import_feature_values_request = gca_featurestore_service.ImportFeatureValuesRequest(
entity_type=_TEST_ENTITY_TYPE_NAME,
feature_specs=[
gca_featurestore_service.ImportFeatureValuesRequest.FeatureSpec(
@@ -1765,8 +1800,39 @@ def test_ingest_from_df_using_datetime(self, import_feature_values_mock):
feature_time=timestamp_proto,
)
+ bq_init_schema_field_mock.assert_called_once_with(
+ name=_TEST_IMPORTING_FEATURE_SOURCE_FIELD,
+ field_type=_TEST_FEATURE_VALUE_TYPE_BQ_FIELD_TYPE,
+ mode=_TEST_FEATURE_VALUE_TYPE_BQ_MODE,
+ )
+
import_feature_values_mock.assert_called_once_with(
- request=true_import_feature_values_request, metadata=_TEST_REQUEST_METADATA,
+ request=expected_import_feature_values_request,
+ metadata=_TEST_REQUEST_METADATA,
+ )
+
+ @pytest.mark.parametrize(
+ "feature_value_type, expected_field_type, expected_mode",
+ [
+ ("BOOL", "BOOL", "NULLABLE"),
+ ("BOOL_ARRAY", "BOOL", "REPEATED"),
+ ("DOUBLE", "FLOAT64", "NULLABLE"),
+ ("DOUBLE_ARRAY", "FLOAT64", "REPEATED"),
+ ("INT64", "INT64", "NULLABLE"),
+ ("INT64_ARRAY", "INT64", "REPEATED"),
+ ("STRING", "STRING", "NULLABLE"),
+ ("STRING_ARRAY", "STRING", "REPEATED"),
+ ("BYTES", "BYTES", "NULLABLE"),
+ ],
+ )
+ def test_get_bq_schema_field(
+ self, feature_value_type, expected_field_type, expected_mode
+ ):
+ expected_bq_schema_field = bigquery.SchemaField(
+ name=_TEST_FEATURE_ID, field_type=expected_field_type, mode=expected_mode,
+ )
+ assert expected_bq_schema_field == aiplatform.EntityType._get_bq_schema_field(
+ name=_TEST_FEATURE_ID, feature_value_type=feature_value_type
)
@pytest.mark.usefixtures("get_entity_type_mock", "get_feature_mock")
From 5ee6354a12c6422015acb81caef32d6d2f52c838 Mon Sep 17 00:00:00 2001
From: nayaknishant
Date: Thu, 24 Feb 2022 13:20:25 -0800
Subject: [PATCH 07/15] docs(samples): add samples to create/delete
featurestore (#980)
* feat: SDK feature store samples (create/delete fs)
* feat: adding to conftest.py
* docs(samples): fixed testing
* docs(samples): fixed testing
* docs(samples): fixed testing
* docs(samples) added changes
* docs(samples): style issues
* Update samples/model-builder/create_featurestore_sample_test.py
Co-authored-by: Morgan Du
* Update samples/model-builder/test_constants.py
Co-authored-by: Morgan Du
* Update samples/model-builder/create_featurestore_sample_test.py
Co-authored-by: Morgan Du
Co-authored-by: Morgan Du
---
samples/model-builder/conftest.py | 35 ++++++++++++++++
.../create_featurestore_sample.py | 41 +++++++++++++++++++
.../create_featurestore_sample_test.py | 37 +++++++++++++++++
.../delete_featurestore_sample.py | 34 +++++++++++++++
.../delete_featurestore_sample_test.py | 41 +++++++++++++++++++
samples/model-builder/test_constants.py | 7 ++++
6 files changed, 195 insertions(+)
create mode 100644 samples/model-builder/create_featurestore_sample.py
create mode 100644 samples/model-builder/create_featurestore_sample_test.py
create mode 100644 samples/model-builder/delete_featurestore_sample.py
create mode 100644 samples/model-builder/delete_featurestore_sample_test.py
diff --git a/samples/model-builder/conftest.py b/samples/model-builder/conftest.py
index c6bbd30fc0..d18fca064d 100644
--- a/samples/model-builder/conftest.py
+++ b/samples/model-builder/conftest.py
@@ -364,3 +364,38 @@ def mock_endpoint_explain(mock_endpoint):
with patch.object(mock_endpoint, "explain") as mock_endpoint_explain:
mock_get_endpoint.return_value = mock_endpoint
yield mock_endpoint_explain
+
+
+"""
+----------------------------------------------------------------------------
+FeatureStore Fixtures
+----------------------------------------------------------------------------
+"""
+
+
+@pytest.fixture
+def mock_featurestore():
+ mock = MagicMock(aiplatform.featurestore.Featurestore)
+ yield mock
+
+
+@pytest.fixture
+def mock_get_featurestore(mock_featurestore):
+ with patch.object(aiplatform.featurestore, "Featurestore") as mock_get_featurestore:
+ mock_get_featurestore.return_value = mock_featurestore
+ yield mock_get_featurestore
+
+
+@pytest.fixture
+def mock_create_featurestore(mock_featurestore):
+ with patch.object(
+ aiplatform.featurestore.Featurestore, "create"
+ ) as mock_create_featurestore:
+ mock_create_featurestore.return_value = mock_featurestore
+ yield mock_create_featurestore
+
+
+@pytest.fixture
+def mock_delete_featurestore(mock_featurestore):
+ with patch.object(mock_featurestore, "delete") as mock_delete_featurestore:
+ yield mock_delete_featurestore
diff --git a/samples/model-builder/create_featurestore_sample.py b/samples/model-builder/create_featurestore_sample.py
new file mode 100644
index 0000000000..48a143aa11
--- /dev/null
+++ b/samples/model-builder/create_featurestore_sample.py
@@ -0,0 +1,41 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# [START aiplatform_sdk_create_featurestore_sample]
+from google.cloud import aiplatform
+
+
+def create_featurestore_sample(
+ project: str,
+ location: str,
+ featurestore_id: str,
+ online_store_fixed_node_count: int = 1,
+ sync: bool = True,
+):
+
+ aiplatform.init(project=project, location=location)
+
+ fs = aiplatform.Featurestore.create(
+ featurestore_id=featurestore_id,
+ online_store_fixed_node_count=online_store_fixed_node_count,
+ sync=sync,
+ )
+
+ fs.wait()
+
+ return fs
+
+
+# [END aiplatform_sdk_create_featurestore_sample]
diff --git a/samples/model-builder/create_featurestore_sample_test.py b/samples/model-builder/create_featurestore_sample_test.py
new file mode 100644
index 0000000000..e133f4582b
--- /dev/null
+++ b/samples/model-builder/create_featurestore_sample_test.py
@@ -0,0 +1,37 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import create_featurestore_sample
+import test_constants as constants
+
+
+def test_create_featurestore_sample(mock_sdk_init, mock_create_featurestore):
+
+ create_featurestore_sample.create_featurestore_sample(
+ project=constants.PROJECT,
+ location=constants.LOCATION,
+ featurestore_id=constants.FEAUTURESTORE_ID,
+ online_store_fixed_node_count=constants.ONLINE_STORE_FIXED_NODE_COUNT,
+ sync=constants.SYNC,
+ )
+
+ mock_sdk_init.assert_called_once_with(
+ project=constants.PROJECT, location=constants.LOCATION
+ )
+
+ mock_create_featurestore.assert_called_once_with(
+ featurestore_id=constants.FEAUTURESTORE_ID,
+ online_store_fixed_node_count=constants.ONLINE_STORE_FIXED_NODE_COUNT,
+ sync=constants.SYNC,
+ )
diff --git a/samples/model-builder/delete_featurestore_sample.py b/samples/model-builder/delete_featurestore_sample.py
new file mode 100644
index 0000000000..a03a6d1d5a
--- /dev/null
+++ b/samples/model-builder/delete_featurestore_sample.py
@@ -0,0 +1,34 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# [START aiplatform_sdk_delete_featurestore_sample]
+from google.cloud import aiplatform
+
+
+def delete_featurestore_sample(
+ project: str,
+ location: str,
+ featurestore_name: str,
+ sync: bool = True,
+ force: bool = True,
+):
+
+ aiplatform.init(project=project, location=location)
+
+ fs = aiplatform.featurestore.Featurestore(featurestore_name=featurestore_name)
+ fs.delete(sync=sync, force=force)
+
+
+# [END aiplatform_sdk_delete_featurestore_sample]
diff --git a/samples/model-builder/delete_featurestore_sample_test.py b/samples/model-builder/delete_featurestore_sample_test.py
new file mode 100644
index 0000000000..5361c8c881
--- /dev/null
+++ b/samples/model-builder/delete_featurestore_sample_test.py
@@ -0,0 +1,41 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import delete_featurestore_sample
+import test_constants as constants
+
+
+def test_delete_featurestore_sample(
+ mock_sdk_init, mock_get_featurestore, mock_delete_featurestore
+):
+
+ delete_featurestore_sample.delete_featurestore_sample(
+ project=constants.PROJECT,
+ location=constants.LOCATION,
+ featurestore_name=constants.FEAUTURESTORE_NAME,
+ sync=constants.SYNC,
+ force=constants.FORCE,
+ )
+
+ mock_sdk_init.assert_called_once_with(
+ project=constants.PROJECT, location=constants.LOCATION
+ )
+
+ mock_get_featurestore.assert_called_once_with(
+ featurestore_name=constants.FEAUTURESTORE_NAME
+ )
+
+ mock_delete_featurestore.assert_called_once_with(
+ sync=constants.SYNC, force=constants.FORCE
+ )
diff --git a/samples/model-builder/test_constants.py b/samples/model-builder/test_constants.py
index 0e4e0f5273..ebd65211c3 100644
--- a/samples/model-builder/test_constants.py
+++ b/samples/model-builder/test_constants.py
@@ -197,3 +197,10 @@
)
PYTHON_MODULE_NAME = "trainer.task"
MODEL_TYPE = "CLOUD"
+
+# Feature store constants
+FEAUTURESTORE_NAME = "projects/123/locations/us-central1/featurestores/featurestore_id"
+FEAUTURESTORE_ID = "featurestore_id"
+ONLINE_STORE_FIXED_NODE_COUNT = 1
+SYNC = True
+FORCE = True
From 5fe59a4015882d56c22f9973aff888966dd53a2e Mon Sep 17 00:00:00 2001
From: dwkk-google <98289452+dwkk-google@users.noreply.github.com>
Date: Fri, 25 Feb 2022 19:01:43 +0000
Subject: [PATCH 08/15] feat: add additional_experiement flag in the tables and
forecasting training job (#979)
* Update training_jobs.py
* Update test_automl_forecasting_training_jobs.py
* Update training_jobs.py
* Update test_automl_tabular_training_jobs.py
* Update test_automl_forecasting_training_jobs.py
* Update test_automl_tabular_training_jobs.py
* Update google/cloud/aiplatform/training_jobs.py
Co-authored-by: sasha-gitg <44654632+sasha-gitg@users.noreply.github.com>
* Update google/cloud/aiplatform/training_jobs.py
Co-authored-by: sasha-gitg <44654632+sasha-gitg@users.noreply.github.com>
* Update test_automl_forecasting_training_jobs.py
* Update test_automl_tabular_training_jobs.py
* Update training_jobs.py
* Update training_jobs.py
Co-authored-by: sasha-gitg <44654632+sasha-gitg@users.noreply.github.com>
---
google/cloud/aiplatform/training_jobs.py | 208 +-----------------
.../test_automl_forecasting_training_jobs.py | 74 +------
.../test_automl_tabular_training_jobs.py | 3 +-
3 files changed, 21 insertions(+), 264 deletions(-)
diff --git a/google/cloud/aiplatform/training_jobs.py b/google/cloud/aiplatform/training_jobs.py
index a0e8ed8125..52a844b248 100644
--- a/google/cloud/aiplatform/training_jobs.py
+++ b/google/cloud/aiplatform/training_jobs.py
@@ -3371,6 +3371,7 @@ def run(
export_evaluated_data_items: bool = False,
export_evaluated_data_items_bigquery_destination_uri: Optional[str] = None,
export_evaluated_data_items_override_destination: bool = False,
+ additional_experiments: Optional[List[str]] = None,
sync: bool = True,
) -> models.Model:
"""Runs the training job and returns a model.
@@ -3497,6 +3498,8 @@ def run(
Applies only if [export_evaluated_data_items] is True and
[export_evaluated_data_items_bigquery_destination_uri] is specified.
+ additional_experiments (List[str]):
+ Optional. Additional experiment flags for the automl tables training.
sync (bool):
Whether to execute this method synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
@@ -3519,6 +3522,9 @@ def run(
if self._has_run:
raise RuntimeError("AutoML Tabular Training has already run.")
+ if additional_experiments:
+ self._add_additional_experiments(additional_experiments)
+
return self._run(
dataset=dataset,
target_column=target_column,
@@ -3961,6 +3967,7 @@ def run(
budget_milli_node_hours: int = 1000,
model_display_name: Optional[str] = None,
model_labels: Optional[Dict[str, str]] = None,
+ additional_experiments: Optional[List[str]] = None,
sync: bool = True,
) -> models.Model:
"""Runs the training job and returns a model.
@@ -4107,6 +4114,8 @@ def run(
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
+ additional_experiments (List[str]):
+ Optional. Additional experiment flags for the time series forcasting training.
sync (bool):
Whether to execute this method synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
@@ -4132,6 +4141,9 @@ def run(
if self._has_run:
raise RuntimeError("AutoML Forecasting Training has already run.")
+ if additional_experiments:
+ self._add_additional_experiments(additional_experiments)
+
return self._run(
dataset=dataset,
target_column=target_column,
@@ -4160,202 +4172,6 @@ def run(
sync=sync,
)
- def _run_with_experiments(
- self,
- dataset: datasets.TimeSeriesDataset,
- target_column: str,
- time_column: str,
- time_series_identifier_column: str,
- unavailable_at_forecast_columns: List[str],
- available_at_forecast_columns: List[str],
- forecast_horizon: int,
- data_granularity_unit: str,
- data_granularity_count: int,
- predefined_split_column_name: Optional[str] = None,
- weight_column: Optional[str] = None,
- time_series_attribute_columns: Optional[List[str]] = None,
- context_window: Optional[int] = None,
- export_evaluated_data_items: bool = False,
- export_evaluated_data_items_bigquery_destination_uri: Optional[str] = None,
- export_evaluated_data_items_override_destination: bool = False,
- quantiles: Optional[List[float]] = None,
- validation_options: Optional[str] = None,
- budget_milli_node_hours: int = 1000,
- model_display_name: Optional[str] = None,
- model_labels: Optional[Dict[str, str]] = None,
- sync: bool = True,
- additional_experiments: Optional[List[str]] = None,
- ) -> models.Model:
- """Runs the training job with experiment flags and returns a model.
-
- The training data splits are set by default: Roughly 80% will be used for training,
- 10% for validation, and 10% for test.
-
- Args:
- dataset (datasets.TimeSeriesDataset):
- Required. The dataset within the same Project from which data will be used to train the Model. The
- Dataset must use schema compatible with Model being trained,
- and what is compatible should be described in the used
- TrainingPipeline's [training_task_definition]
- [google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition].
- For time series Datasets, all their data is exported to
- training, to pick and choose from.
- target_column (str):
- Required. Name of the column that the Model is to predict values for.
- time_column (str):
- Required. Name of the column that identifies time order in the time series.
- time_series_identifier_column (str):
- Required. Name of the column that identifies the time series.
- unavailable_at_forecast_columns (List[str]):
- Required. Column names of columns that are unavailable at forecast.
- Each column contains information for the given entity (identified by the
- [time_series_identifier_column]) that is unknown before the forecast
- (e.g. population of a city in a given year, or weather on a given day).
- available_at_forecast_columns (List[str]):
- Required. Column names of columns that are available at forecast.
- Each column contains information for the given entity (identified by the
- [time_series_identifier_column]) that is known at forecast.
- forecast_horizon: (int):
- Required. The amount of time into the future for which forecasted values for the target are
- returned. Expressed in number of units defined by the [data_granularity_unit] and
- [data_granularity_count] field. Inclusive.
- data_granularity_unit (str):
- Required. The data granularity unit. Accepted values are ``minute``,
- ``hour``, ``day``, ``week``, ``month``, ``year``.
- data_granularity_count (int):
- Required. The number of data granularity units between data points in the training
- data. If [data_granularity_unit] is `minute`, can be 1, 5, 10, 15, or 30. For all other
- values of [data_granularity_unit], must be 1.
- predefined_split_column_name (str):
- Optional. The key is a name of one of the Dataset's data
- columns. The value of the key (either the label's value or
- value in the column) must be one of {``TRAIN``,
- ``VALIDATE``, ``TEST``}, and it defines to which set the
- given piece of data is assigned. If for a piece of data the
- key is not present or has an invalid value, that piece is
- ignored by the pipeline.
-
- Supported only for tabular and time series Datasets.
- weight_column (str):
- Optional. Name of the column that should be used as the weight column.
- Higher values in this column give more importance to the row
- during Model training. The column must have numeric values between 0 and
- 10000 inclusively, and 0 value means that the row is ignored.
- If the weight column field is not set, then all rows are assumed to have
- equal weight of 1.
- time_series_attribute_columns (List[str]):
- Optional. Column names that should be used as attribute columns.
- Each column is constant within a time series.
- context_window (int):
- Optional. The amount of time into the past training and prediction data is used for
- model training and prediction respectively. Expressed in number of units defined by the
- [data_granularity_unit] and [data_granularity_count] fields. When not provided uses the
- default value of 0 which means the model sets each series context window to be 0 (also
- known as "cold start"). Inclusive.
- export_evaluated_data_items (bool):
- Whether to export the test set predictions to a BigQuery table.
- If False, then the export is not performed.
- export_evaluated_data_items_bigquery_destination_uri (string):
- Optional. URI of desired destination BigQuery table for exported test set predictions.
-
- Expected format:
- ``bq://::``
-
- If not specified, then results are exported to the following auto-created BigQuery
- table:
- ``:export_evaluated_examples__.evaluated_examples``
-
- Applies only if [export_evaluated_data_items] is True.
- export_evaluated_data_items_override_destination (bool):
- Whether to override the contents of [export_evaluated_data_items_bigquery_destination_uri],
- if the table exists, for exported test set predictions. If False, and the
- table exists, then the training job will fail.
-
- Applies only if [export_evaluated_data_items] is True and
- [export_evaluated_data_items_bigquery_destination_uri] is specified.
- quantiles (List[float]):
- Quantiles to use for the `minizmize-quantile-loss`
- [AutoMLForecastingTrainingJob.optimization_objective]. This argument is required in
- this case.
-
- Accepts up to 5 quantiles in the form of a double from 0 to 1, exclusive.
- Each quantile must be unique.
- validation_options (str):
- Validation options for the data validation component. The available options are:
- "fail-pipeline" - (default), will validate against the validation and fail the pipeline
- if it fails.
- "ignore-validation" - ignore the results of the validation and continue the pipeline
- budget_milli_node_hours (int):
- Optional. The train budget of creating this Model, expressed in milli node
- hours i.e. 1,000 value in this field means 1 node hour.
- The training cost of the model will not exceed this budget. The final
- cost will be attempted to be close to the budget, though may end up
- being (even) noticeably smaller - at the backend's discretion. This
- especially may happen when further model training ceases to provide
- any improvements.
- If the budget is set to a value known to be insufficient to train a
- Model for the given training set, the training won't be attempted and
- will error.
- The minimum value is 1000 and the maximum is 72000.
- model_display_name (str):
- Optional. If the script produces a managed Vertex AI Model. The display name of
- the Model. The name can be up to 128 characters long and can be consist
- of any UTF-8 characters.
-
- If not provided upon creation, the job's display_name is used.
- model_labels (Dict[str, str]):
- Optional. The labels with user-defined metadata to
- organize your Models.
- Label keys and values can be no longer than 64
- characters (Unicode codepoints), can only
- contain lowercase letters, numeric characters,
- underscores and dashes. International characters
- are allowed.
- See https://goo.gl/xmQnxf for more information
- and examples of labels.
- sync (bool):
- Whether to execute this method synchronously. If False, this method
- will be executed in concurrent Future and any downstream object will
- be immediately returned and synced when the Future has completed.
- additional_experiments (List[str]):
- Additional experiment flags for the time series forcasting training.
-
- Returns:
- model: The trained Vertex AI Model resource or None if training did not
- produce a Vertex AI Model.
-
- Raises:
- RuntimeError: If Training job has already been run or is waiting to run.
- """
-
- if additional_experiments:
- self._add_additional_experiments(additional_experiments)
-
- return self.run(
- dataset=dataset,
- target_column=target_column,
- time_column=time_column,
- time_series_identifier_column=time_series_identifier_column,
- unavailable_at_forecast_columns=unavailable_at_forecast_columns,
- available_at_forecast_columns=available_at_forecast_columns,
- forecast_horizon=forecast_horizon,
- data_granularity_unit=data_granularity_unit,
- data_granularity_count=data_granularity_count,
- predefined_split_column_name=predefined_split_column_name,
- weight_column=weight_column,
- time_series_attribute_columns=time_series_attribute_columns,
- context_window=context_window,
- budget_milli_node_hours=budget_milli_node_hours,
- export_evaluated_data_items=export_evaluated_data_items,
- export_evaluated_data_items_bigquery_destination_uri=export_evaluated_data_items_bigquery_destination_uri,
- export_evaluated_data_items_override_destination=export_evaluated_data_items_override_destination,
- quantiles=quantiles,
- validation_options=validation_options,
- model_display_name=model_display_name,
- model_labels=model_labels,
- sync=sync,
- )
-
@base.optional_sync()
def _run(
self,
diff --git a/tests/unit/aiplatform/test_automl_forecasting_training_jobs.py b/tests/unit/aiplatform/test_automl_forecasting_training_jobs.py
index 9f758987bc..3ca54a8ad6 100644
--- a/tests/unit/aiplatform/test_automl_forecasting_training_jobs.py
+++ b/tests/unit/aiplatform/test_automl_forecasting_training_jobs.py
@@ -91,9 +91,7 @@
"validationOptions": _TEST_TRAINING_VALIDATION_OPTIONS,
"optimizationObjective": _TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
}
-_TEST_TRAINING_TASK_INPUTS = json_format.ParseDict(
- _TEST_TRAINING_TASK_INPUTS_DICT, struct_pb2.Value(),
-)
+
_TEST_TRAINING_TASK_INPUTS_WITH_ADDITIONAL_EXPERIMENTS = json_format.ParseDict(
{
**_TEST_TRAINING_TASK_INPUTS_DICT,
@@ -102,6 +100,10 @@
struct_pb2.Value(),
)
+_TEST_TRAINING_TASK_INPUTS = json_format.ParseDict(
+ _TEST_TRAINING_TASK_INPUTS_DICT, struct_pb2.Value(),
+)
+
_TEST_DATASET_NAME = "test-dataset-name"
_TEST_MODEL_DISPLAY_NAME = "model-display-name"
@@ -269,6 +271,7 @@ def test_run_call_pipeline_service_create(
export_evaluated_data_items_override_destination=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_OVERRIDE_DESTINATION,
quantiles=_TEST_TRAINING_QUANTILES,
validation_options=_TEST_TRAINING_VALIDATION_OPTIONS,
+ additional_experiments=_TEST_ADDITIONAL_EXPERIMENTS,
sync=sync,
)
@@ -290,7 +293,7 @@ def test_run_call_pipeline_service_create(
display_name=_TEST_DISPLAY_NAME,
labels=_TEST_LABELS,
training_task_definition=schema.training_job.definition.automl_forecasting,
- training_task_inputs=_TEST_TRAINING_TASK_INPUTS,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS_WITH_ADDITIONAL_EXPERIMENTS,
model_to_upload=true_managed_model,
input_data_config=true_input_data_config,
)
@@ -380,69 +383,6 @@ def test_run_call_pipeline_if_no_model_display_name_nor_model_labels(
training_pipeline=true_training_pipeline,
)
- @pytest.mark.usefixtures("mock_pipeline_service_get")
- @pytest.mark.parametrize("sync", [True, False])
- def test_run_with_experiments(
- self,
- mock_pipeline_service_create,
- mock_dataset_time_series,
- mock_model_service_get,
- sync,
- ):
- aiplatform.init(project=_TEST_PROJECT, staging_bucket=_TEST_BUCKET_NAME)
-
- job = AutoMLForecastingTrainingJob(
- display_name=_TEST_DISPLAY_NAME,
- optimization_objective=_TEST_TRAINING_OPTIMIZATION_OBJECTIVE_NAME,
- column_transformations=_TEST_TRAINING_COLUMN_TRANSFORMATIONS,
- )
-
- model_from_job = job._run_with_experiments(
- dataset=mock_dataset_time_series,
- target_column=_TEST_TRAINING_TARGET_COLUMN,
- time_column=_TEST_TRAINING_TIME_COLUMN,
- time_series_identifier_column=_TEST_TRAINING_TIME_SERIES_IDENTIFIER_COLUMN,
- unavailable_at_forecast_columns=_TEST_TRAINING_UNAVAILABLE_AT_FORECAST_COLUMNS,
- available_at_forecast_columns=_TEST_TRAINING_AVAILABLE_AT_FORECAST_COLUMNS,
- forecast_horizon=_TEST_TRAINING_FORECAST_HORIZON,
- data_granularity_unit=_TEST_TRAINING_DATA_GRANULARITY_UNIT,
- data_granularity_count=_TEST_TRAINING_DATA_GRANULARITY_COUNT,
- weight_column=_TEST_TRAINING_WEIGHT_COLUMN,
- time_series_attribute_columns=_TEST_TRAINING_TIME_SERIES_ATTRIBUTE_COLUMNS,
- context_window=_TEST_TRAINING_CONTEXT_WINDOW,
- budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
- export_evaluated_data_items=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS,
- export_evaluated_data_items_bigquery_destination_uri=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_BIGQUERY_DESTINATION_URI,
- export_evaluated_data_items_override_destination=_TEST_TRAINING_EXPORT_EVALUATED_DATA_ITEMS_OVERRIDE_DESTINATION,
- quantiles=_TEST_TRAINING_QUANTILES,
- validation_options=_TEST_TRAINING_VALIDATION_OPTIONS,
- sync=sync,
- additional_experiments=_TEST_ADDITIONAL_EXPERIMENTS,
- )
-
- if not sync:
- model_from_job.wait()
-
- # Test that if defaults to the job display name
- true_managed_model = gca_model.Model(display_name=_TEST_DISPLAY_NAME)
-
- true_input_data_config = gca_training_pipeline.InputDataConfig(
- dataset_id=mock_dataset_time_series.name,
- )
-
- true_training_pipeline = gca_training_pipeline.TrainingPipeline(
- display_name=_TEST_DISPLAY_NAME,
- training_task_definition=schema.training_job.definition.automl_forecasting,
- training_task_inputs=_TEST_TRAINING_TASK_INPUTS_WITH_ADDITIONAL_EXPERIMENTS,
- model_to_upload=true_managed_model,
- input_data_config=true_input_data_config,
- )
-
- mock_pipeline_service_create.assert_called_once_with(
- parent=initializer.global_config.common_location_path(),
- training_pipeline=true_training_pipeline,
- )
-
@pytest.mark.usefixtures("mock_pipeline_service_get")
@pytest.mark.parametrize("sync", [True, False])
def test_run_call_pipeline_if_set_additional_experiments(
diff --git a/tests/unit/aiplatform/test_automl_tabular_training_jobs.py b/tests/unit/aiplatform/test_automl_tabular_training_jobs.py
index 8b00792944..7dc3c64acf 100644
--- a/tests/unit/aiplatform/test_automl_tabular_training_jobs.py
+++ b/tests/unit/aiplatform/test_automl_tabular_training_jobs.py
@@ -330,6 +330,7 @@ def test_run_call_pipeline_service_create(
weight_column=_TEST_TRAINING_WEIGHT_COLUMN,
budget_milli_node_hours=_TEST_TRAINING_BUDGET_MILLI_NODE_HOURS,
disable_early_stopping=_TEST_TRAINING_DISABLE_EARLY_STOPPING,
+ additional_experiments=_TEST_ADDITIONAL_EXPERIMENTS,
sync=sync,
)
@@ -354,7 +355,7 @@ def test_run_call_pipeline_service_create(
display_name=_TEST_DISPLAY_NAME,
labels=_TEST_LABELS,
training_task_definition=schema.training_job.definition.automl_tabular,
- training_task_inputs=_TEST_TRAINING_TASK_INPUTS,
+ training_task_inputs=_TEST_TRAINING_TASK_INPUTS_WITH_ADDITIONAL_EXPERIMENTS,
model_to_upload=true_managed_model,
input_data_config=true_input_data_config,
encryption_spec=_TEST_DEFAULT_ENCRYPTION_SPEC,
From d221e6bebd7fb98a8c6e3f3b8ae507f2f214128f Mon Sep 17 00:00:00 2001
From: nayaknishant
Date: Fri, 25 Feb 2022 13:55:32 -0800
Subject: [PATCH 09/15] docs(samples): added create feature and create entity
type samples and tests (#984)
* feat: SDK feature store samples (create/delete fs)
* feat: adding to conftest.py
* docs(samples): fixed testing
* docs(samples): fixed testing
* docs(samples): fixed testing
* docs(samples) added changes
* docs(samples): style issues
* adding create entity
* docs(samples): added create feature and entity type
* docs(samples): edited test
* docs(samples) edited style
* moving constants
* fixed dates
* Update samples/model-builder/create_featurestore_sample_test.py
Co-authored-by: Morgan Du
* Update samples/model-builder/test_constants.py
Co-authored-by: Morgan Du
* Update samples/model-builder/create_featurestore_sample_test.py
Co-authored-by: Morgan Du
* docs(samples): add samples to create/delete featurestore (#980)
* feat: SDK feature store samples (create/delete fs)
* feat: adding to conftest.py
* docs(samples): fixed testing
* docs(samples): fixed testing
* docs(samples): fixed testing
* docs(samples) added changes
* docs(samples): style issues
* Update samples/model-builder/create_featurestore_sample_test.py
Co-authored-by: Morgan Du
* Update samples/model-builder/test_constants.py
Co-authored-by: Morgan Du
* Update samples/model-builder/create_featurestore_sample_test.py
Co-authored-by: Morgan Du
Co-authored-by: Morgan Du
* Update samples/model-builder/test_constants.py
Co-authored-by: Morgan Du
* moving constants
* added variables, made fixes, fixed spelling
Co-authored-by: Morgan Du
---
samples/model-builder/conftest.py | 30 +++++++++++++
.../create_entity_type_sample.py | 35 +++++++++++++++
.../create_entity_type_sample_test.py | 35 +++++++++++++++
.../model-builder/create_feature_sample.py | 43 +++++++++++++++++++
.../create_feature_sample_test.py | 39 +++++++++++++++++
.../create_featurestore_sample_test.py | 4 +-
.../delete_featurestore_sample_test.py | 4 +-
samples/model-builder/test_constants.py | 9 +++-
8 files changed, 193 insertions(+), 6 deletions(-)
create mode 100644 samples/model-builder/create_entity_type_sample.py
create mode 100644 samples/model-builder/create_entity_type_sample_test.py
create mode 100644 samples/model-builder/create_feature_sample.py
create mode 100644 samples/model-builder/create_feature_sample_test.py
diff --git a/samples/model-builder/conftest.py b/samples/model-builder/conftest.py
index d18fca064d..64cfc85902 100644
--- a/samples/model-builder/conftest.py
+++ b/samples/model-builder/conftest.py
@@ -379,6 +379,18 @@ def mock_featurestore():
yield mock
+@pytest.fixture
+def mock_entity_type():
+ mock = MagicMock(aiplatform.featurestore.EntityType)
+ yield mock
+
+
+@pytest.fixture
+def mock_feature():
+ mock = MagicMock(aiplatform.featurestore.Feature)
+ yield mock
+
+
@pytest.fixture
def mock_get_featurestore(mock_featurestore):
with patch.object(aiplatform.featurestore, "Featurestore") as mock_get_featurestore:
@@ -395,6 +407,24 @@ def mock_create_featurestore(mock_featurestore):
yield mock_create_featurestore
+@pytest.fixture
+def mock_create_entity_type(mock_entity_type):
+ with patch.object(
+ aiplatform.featurestore.EntityType, "create"
+ ) as mock_create_entity_type:
+ mock_create_entity_type.return_value = mock_entity_type
+ yield mock_create_entity_type
+
+
+@pytest.fixture
+def mock_create_feature(mock_feature):
+ with patch.object(
+ aiplatform.featurestore.Feature, "create"
+ ) as mock_create_feature:
+ mock_create_feature.return_value = mock_feature
+ yield mock_create_feature
+
+
@pytest.fixture
def mock_delete_featurestore(mock_featurestore):
with patch.object(mock_featurestore, "delete") as mock_delete_featurestore:
diff --git a/samples/model-builder/create_entity_type_sample.py b/samples/model-builder/create_entity_type_sample.py
new file mode 100644
index 0000000000..eee0ddb8f5
--- /dev/null
+++ b/samples/model-builder/create_entity_type_sample.py
@@ -0,0 +1,35 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# [START aiplatform_sdk_create_entity_type_sample]
+from google.cloud import aiplatform
+
+
+def create_entity_type_sample(
+ project: str, location: str, entity_type_id: str, featurestore_name: str,
+):
+
+ aiplatform.init(project=project, location=location)
+
+ my_entity_type = aiplatform.EntityType.create(
+ entity_type_id=entity_type_id, featurestore_name=featurestore_name
+ )
+
+ my_entity_type.wait()
+
+ return my_entity_type
+
+
+# [END aiplatform_sdk_create_entity_type_sample]
diff --git a/samples/model-builder/create_entity_type_sample_test.py b/samples/model-builder/create_entity_type_sample_test.py
new file mode 100644
index 0000000000..fe7dfd3f77
--- /dev/null
+++ b/samples/model-builder/create_entity_type_sample_test.py
@@ -0,0 +1,35 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import create_entity_type_sample
+import test_constants as constants
+
+
+def test_create_entity_type_sample(mock_sdk_init, mock_create_entity_type):
+
+ create_entity_type_sample.create_entity_type_sample(
+ project=constants.PROJECT,
+ location=constants.LOCATION,
+ entity_type_id=constants.ENTITY_TYPE_ID,
+ featurestore_name=constants.FEATURESTORE_NAME,
+ )
+
+ mock_sdk_init.assert_called_once_with(
+ project=constants.PROJECT, location=constants.LOCATION
+ )
+
+ mock_create_entity_type.assert_called_once_with(
+ entity_type_id=constants.ENTITY_TYPE_ID,
+ featurestore_name=constants.FEATURESTORE_NAME,
+ )
diff --git a/samples/model-builder/create_feature_sample.py b/samples/model-builder/create_feature_sample.py
new file mode 100644
index 0000000000..b9b2328ae8
--- /dev/null
+++ b/samples/model-builder/create_feature_sample.py
@@ -0,0 +1,43 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# [START aiplatform_sdk_create_feature_sample]
+from google.cloud import aiplatform
+
+
+def create_feature_sample(
+ project: str,
+ location: str,
+ feature_id: str,
+ value_type: str,
+ entity_type_id: str,
+ featurestore_id: str,
+):
+
+ aiplatform.init(project=project, location=location)
+
+ my_feature = aiplatform.Feature.create(
+ feature_id=feature_id,
+ value_type=value_type,
+ entity_type_name=entity_type_id,
+ featurestore_id=featurestore_id,
+ )
+
+ my_feature.wait()
+
+ return my_feature
+
+
+# [END aiplatform_sdk_create_feature_sample]
diff --git a/samples/model-builder/create_feature_sample_test.py b/samples/model-builder/create_feature_sample_test.py
new file mode 100644
index 0000000000..673f475691
--- /dev/null
+++ b/samples/model-builder/create_feature_sample_test.py
@@ -0,0 +1,39 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import create_feature_sample
+import test_constants as constants
+
+
+def test_create_feature_sample(mock_sdk_init, mock_create_feature):
+
+ create_feature_sample.create_feature_sample(
+ project=constants.PROJECT,
+ location=constants.LOCATION,
+ feature_id=constants.FEATURE_ID,
+ value_type=constants.FEATURE_VALUE_TYPE,
+ entity_type_id=constants.ENTITY_TYPE_ID,
+ featurestore_id=constants.FEATURESTORE_ID,
+ )
+
+ mock_sdk_init.assert_called_once_with(
+ project=constants.PROJECT, location=constants.LOCATION
+ )
+
+ mock_create_feature.assert_called_once_with(
+ feature_id=constants.FEATURE_ID,
+ value_type=constants.FEATURE_VALUE_TYPE,
+ entity_type_name=constants.ENTITY_TYPE_ID,
+ featurestore_id=constants.FEATURESTORE_ID,
+ )
diff --git a/samples/model-builder/create_featurestore_sample_test.py b/samples/model-builder/create_featurestore_sample_test.py
index e133f4582b..2531f58c6d 100644
--- a/samples/model-builder/create_featurestore_sample_test.py
+++ b/samples/model-builder/create_featurestore_sample_test.py
@@ -21,7 +21,7 @@ def test_create_featurestore_sample(mock_sdk_init, mock_create_featurestore):
create_featurestore_sample.create_featurestore_sample(
project=constants.PROJECT,
location=constants.LOCATION,
- featurestore_id=constants.FEAUTURESTORE_ID,
+ featurestore_id=constants.FEATURESTORE_ID,
online_store_fixed_node_count=constants.ONLINE_STORE_FIXED_NODE_COUNT,
sync=constants.SYNC,
)
@@ -31,7 +31,7 @@ def test_create_featurestore_sample(mock_sdk_init, mock_create_featurestore):
)
mock_create_featurestore.assert_called_once_with(
- featurestore_id=constants.FEAUTURESTORE_ID,
+ featurestore_id=constants.FEATURESTORE_ID,
online_store_fixed_node_count=constants.ONLINE_STORE_FIXED_NODE_COUNT,
sync=constants.SYNC,
)
diff --git a/samples/model-builder/delete_featurestore_sample_test.py b/samples/model-builder/delete_featurestore_sample_test.py
index 5361c8c881..21b7cba84c 100644
--- a/samples/model-builder/delete_featurestore_sample_test.py
+++ b/samples/model-builder/delete_featurestore_sample_test.py
@@ -23,7 +23,7 @@ def test_delete_featurestore_sample(
delete_featurestore_sample.delete_featurestore_sample(
project=constants.PROJECT,
location=constants.LOCATION,
- featurestore_name=constants.FEAUTURESTORE_NAME,
+ featurestore_name=constants.FEATURESTORE_NAME,
sync=constants.SYNC,
force=constants.FORCE,
)
@@ -33,7 +33,7 @@ def test_delete_featurestore_sample(
)
mock_get_featurestore.assert_called_once_with(
- featurestore_name=constants.FEAUTURESTORE_NAME
+ featurestore_name=constants.FEATURESTORE_NAME
)
mock_delete_featurestore.assert_called_once_with(
diff --git a/samples/model-builder/test_constants.py b/samples/model-builder/test_constants.py
index ebd65211c3..50d02ec7a0 100644
--- a/samples/model-builder/test_constants.py
+++ b/samples/model-builder/test_constants.py
@@ -199,8 +199,13 @@
MODEL_TYPE = "CLOUD"
# Feature store constants
-FEAUTURESTORE_NAME = "projects/123/locations/us-central1/featurestores/featurestore_id"
-FEAUTURESTORE_ID = "featurestore_id"
+FEATURESTORE_ID = "featurestore_id"
+FEATURESTORE_NAME = "projects/123/locations/us-central1/featurestores/featurestore_id"
+ENTITY_TYPE_ID = "entity_type_id"
+ENTITY_TYPE_NAME = "projects/123/locations/us-central1/featurestores/featurestore_id/entityTypes/entity_type_id"
+FEATURE_ID = "feature_id"
+FEATURE_NAME = "projects/123/locations/us-central1/featurestores/featurestore_id/entityTypes/entity_type_id/features/feature_id"
+FEATURE_VALUE_TYPE = "INT64"
ONLINE_STORE_FIXED_NODE_COUNT = 1
SYNC = True
FORCE = True
From 2ba404f8bfbccd7a18ef613417912ed94882c4bd Mon Sep 17 00:00:00 2001
From: Morgan Du
Date: Mon, 28 Feb 2022 14:58:30 -0800
Subject: [PATCH 10/15] fix: loosen assertions for system test featurestore
(#1040)
b/221238283
---
tests/system/aiplatform/test_featurestore.py | 54 +++++++++++++-------
1 file changed, 36 insertions(+), 18 deletions(-)
diff --git a/tests/system/aiplatform/test_featurestore.py b/tests/system/aiplatform/test_featurestore.py
index 9adabcaf3b..0ed8e8131b 100644
--- a/tests/system/aiplatform/test_featurestore.py
+++ b/tests/system/aiplatform/test_featurestore.py
@@ -61,9 +61,6 @@ def test_create_get_list_featurestore(self, shared_state):
project=e2e_base._PROJECT, location=e2e_base._LOCATION,
)
- base_list_featurestores = len(aiplatform.Featurestore.list())
- shared_state["base_list_searched_features"] = len(aiplatform.Feature.search())
-
featurestore_id = self._make_display_name(key=_TEST_FEATURESTORE_ID).replace(
"-", "_"
)[:60]
@@ -79,7 +76,9 @@ def test_create_get_list_featurestore(self, shared_state):
assert featurestore.resource_name == get_featurestore.resource_name
list_featurestores = aiplatform.Featurestore.list()
- assert (len(list_featurestores) - base_list_featurestores) == 1
+ assert get_featurestore.resource_name in [
+ featurestore.resource_name for featurestore in list_featurestores
+ ]
def test_create_get_list_entity_types(self, shared_state):
@@ -121,7 +120,9 @@ def test_create_get_list_entity_types(self, shared_state):
list_entity_types = aiplatform.EntityType.list(
featurestore_name=featurestore_name
)
- assert len(list_entity_types) == 2
+ assert get_movie_entity_type.resource_name in [
+ entity_type.resource_name for entity_type in list_entity_types
+ ]
def test_create_get_list_features(self, shared_state):
@@ -134,9 +135,6 @@ def test_create_get_list_features(self, shared_state):
project=e2e_base._PROJECT, location=e2e_base._LOCATION,
)
- list_user_features = user_entity_type.list_features()
- assert len(list_user_features) == 0
-
# User Features
user_age_feature = user_entity_type.create_feature(
feature_id=_TEST_USER_AGE_FEATURE_ID, value_type="INT64"
@@ -179,7 +177,16 @@ def test_create_get_list_features(self, shared_state):
)
list_user_features = user_entity_type.list_features()
- assert len(list_user_features) == 3
+ list_user_feature_resource_names = [
+ feature.resource_name for feature in list_user_features
+ ]
+
+ assert get_user_age_feature.resource_name in list_user_feature_resource_names
+ assert get_user_gender_feature.resource_name in list_user_feature_resource_names
+ assert (
+ get_user_liked_genres_feature.resource_name
+ in list_user_feature_resource_names
+ )
def test_ingest_feature_values(self, shared_state, caplog):
@@ -223,13 +230,28 @@ def test_batch_create_features(self, shared_state):
_TEST_MOVIE_AVERAGE_RATING_FEATURE_ID: {"value_type": "DOUBLE"},
}
- list_movie_features = movie_entity_type.list_features()
- assert len(list_movie_features) == 0
-
movie_entity_type.batch_create_features(feature_configs=movie_feature_configs)
+ get_movie_title_feature = movie_entity_type.get_feature(
+ feature_id=_TEST_MOVIE_TITLE_FEATURE_ID
+ )
+ get_movie_genres_feature = movie_entity_type.get_feature(
+ feature_id=_TEST_MOVIE_GENRES_FEATURE_ID
+ )
+ get_movie_avg_rating_feature = movie_entity_type.get_feature(
+ feature_id=_TEST_MOVIE_AVERAGE_RATING_FEATURE_ID
+ )
+
list_movie_features = movie_entity_type.list_features()
- assert len(list_movie_features) == 3
+ movie_feature_resource_names = [
+ feature.resource_name for feature in list_movie_features
+ ]
+
+ assert get_movie_title_feature.resource_name in movie_feature_resource_names
+ assert get_movie_genres_feature.resource_name in movie_feature_resource_names
+ assert (
+ get_movie_avg_rating_feature.resource_name in movie_feature_resource_names
+ )
def test_ingest_feature_values_from_df_using_feature_time_column_and_online_read_multiple_entities(
self, shared_state, caplog
@@ -400,16 +422,12 @@ def test_ingest_feature_values_from_df_using_feature_time_datetime_and_online_re
def test_search_features(self, shared_state):
- assert shared_state["base_list_searched_features"] is not None
-
aiplatform.init(
project=e2e_base._PROJECT, location=e2e_base._LOCATION,
)
list_searched_features = aiplatform.Feature.search()
- assert (
- len(list_searched_features) - shared_state["base_list_searched_features"]
- ) == 6
+ assert len(list_searched_features) >= 1
def test_batch_serve_to_df(self, shared_state, caplog):
From e7d27193f323f88f4238206ecb380d746d98df31 Mon Sep 17 00:00:00 2001
From: Morgan Du
Date: Tue, 1 Mar 2022 12:54:24 -0800
Subject: [PATCH 11/15] fix: update system test_model_upload to use
BUILD_SPECIFIC_GCP_PROJECT (#1043)
Fixes #972, #877
---
tests/system/aiplatform/test_model_upload.py | 13 ++++++-------
1 file changed, 6 insertions(+), 7 deletions(-)
diff --git a/tests/system/aiplatform/test_model_upload.py b/tests/system/aiplatform/test_model_upload.py
index 76fe679021..ecc70b0c36 100644
--- a/tests/system/aiplatform/test_model_upload.py
+++ b/tests/system/aiplatform/test_model_upload.py
@@ -19,29 +19,28 @@
import pytest
-from google import auth as google_auth
from google.cloud import aiplatform
from google.cloud import storage
from tests.system.aiplatform import e2e_base
-# TODO(vinnys): Replace with env var `BUILD_SPECIFIC_GCP_PROJECT` once supported
-_, _TEST_PROJECT = google_auth.default()
-_TEST_LOCATION = "us-central1"
_XGBOOST_MODEL_URI = "gs://cloud-samples-data-us-central1/vertex-ai/google-cloud-aiplatform-ci-artifacts/models/iris_xgboost/model.bst"
@pytest.mark.usefixtures("delete_staging_bucket")
class TestModel(e2e_base.TestEndToEnd):
- _temp_prefix = f"{_TEST_PROJECT}-vertex-staging-{_TEST_LOCATION}"
+
+ _temp_prefix = "temp_vertex_sdk_e2e_model_upload_test"
def test_upload_and_deploy_xgboost_model(self, shared_state):
"""Upload XGBoost model from local file and deploy it for prediction. Additionally, update model name, description and labels"""
- aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
+ aiplatform.init(
+ project=e2e_base._PROJECT, location=e2e_base._LOCATION,
+ )
- storage_client = storage.Client(project=_TEST_PROJECT)
+ storage_client = storage.Client(project=e2e_base._PROJECT)
model_blob = storage.Blob.from_string(
uri=_XGBOOST_MODEL_URI, client=storage_client
)
From b9a057d001deb8727cb725d44bb5528dce330653 Mon Sep 17 00:00:00 2001
From: Taisei Klasen
Date: Wed, 2 Mar 2022 10:54:23 -0800
Subject: [PATCH 12/15] fix: Fix create_lit_model_from_endpoint not accepting
models that don't return a dictionary. (#1020)
Some models, like Keras squential models, don't return a dictionary for their prediction. We need to support these models as it is commonly used.
Fixes b/220167889
---
google/cloud/aiplatform/explain/lit.py | 9 +-
tests/unit/aiplatform/test_explain_lit.py | 159 ++++++++++++++++++++--
2 files changed, 153 insertions(+), 15 deletions(-)
diff --git a/google/cloud/aiplatform/explain/lit.py b/google/cloud/aiplatform/explain/lit.py
index 5032055801..635ebb1ce8 100644
--- a/google/cloud/aiplatform/explain/lit.py
+++ b/google/cloud/aiplatform/explain/lit.py
@@ -18,7 +18,7 @@
import os
from google.cloud import aiplatform
-from typing import Dict, List, Optional, Tuple, Union
+from typing import Dict, List, Mapping, Optional, Tuple, Union
try:
from lit_nlp.api import dataset as lit_dataset
@@ -154,7 +154,12 @@ def predict_minibatch(
prediction_object = self._endpoint.predict(instances)
outputs = []
for prediction in prediction_object.predictions:
- outputs.append({key: prediction[key] for key in self._output_types})
+ if isinstance(prediction, Mapping):
+ outputs.append({key: prediction[key] for key in self._output_types})
+ else:
+ outputs.append(
+ {key: prediction[i] for i, key in enumerate(self._output_types)}
+ )
if self._explanation_enabled:
for i, explanation in enumerate(prediction_object.explanations):
attributions = explanation.attributions
diff --git a/tests/unit/aiplatform/test_explain_lit.py b/tests/unit/aiplatform/test_explain_lit.py
index c8092b1742..fe9b269610 100644
--- a/tests/unit/aiplatform/test_explain_lit.py
+++ b/tests/unit/aiplatform/test_explain_lit.py
@@ -105,7 +105,8 @@
),
]
_TEST_TRAFFIC_SPLIT = {_TEST_ID: 0, _TEST_ID_2: 100, _TEST_ID_3: 0}
-_TEST_PREDICTION = [{"label": 1.0}]
+_TEST_DICT_PREDICTION = [{"label": 1.0}]
+_TEST_LIST_PREDICTION = [[1.0]]
_TEST_EXPLANATIONS = [gca_prediction_service.explanation.Explanation(attributions=[])]
_TEST_ATTRIBUTIONS = [
gca_prediction_service.explanation.Attribution(
@@ -218,26 +219,54 @@ def get_endpoint_with_models_with_explanation_mock():
@pytest.fixture
-def predict_client_predict_mock():
+def predict_client_predict_dict_mock():
with mock.patch.object(
prediction_service_client.PredictionServiceClient, "predict"
) as predict_mock:
predict_mock.return_value = gca_prediction_service.PredictResponse(
deployed_model_id=_TEST_ID
)
- predict_mock.return_value.predictions.extend(_TEST_PREDICTION)
+ predict_mock.return_value.predictions.extend(_TEST_DICT_PREDICTION)
yield predict_mock
@pytest.fixture
-def predict_client_explain_mock():
+def predict_client_explain_dict_mock():
with mock.patch.object(
prediction_service_client.PredictionServiceClient, "explain"
) as predict_mock:
predict_mock.return_value = gca_prediction_service.ExplainResponse(
deployed_model_id=_TEST_ID,
)
- predict_mock.return_value.predictions.extend(_TEST_PREDICTION)
+ predict_mock.return_value.predictions.extend(_TEST_DICT_PREDICTION)
+ predict_mock.return_value.explanations.extend(_TEST_EXPLANATIONS)
+ predict_mock.return_value.explanations[0].attributions.extend(
+ _TEST_ATTRIBUTIONS
+ )
+ yield predict_mock
+
+
+@pytest.fixture
+def predict_client_predict_list_mock():
+ with mock.patch.object(
+ prediction_service_client.PredictionServiceClient, "predict"
+ ) as predict_mock:
+ predict_mock.return_value = gca_prediction_service.PredictResponse(
+ deployed_model_id=_TEST_ID
+ )
+ predict_mock.return_value.predictions.extend(_TEST_LIST_PREDICTION)
+ yield predict_mock
+
+
+@pytest.fixture
+def predict_client_explain_list_mock():
+ with mock.patch.object(
+ prediction_service_client.PredictionServiceClient, "explain"
+ ) as predict_mock:
+ predict_mock.return_value = gca_prediction_service.ExplainResponse(
+ deployed_model_id=_TEST_ID,
+ )
+ predict_mock.return_value.predictions.extend(_TEST_LIST_PREDICTION)
predict_mock.return_value.explanations.extend(_TEST_EXPLANATIONS)
predict_mock.return_value.explanations[0].attributions.extend(
_TEST_ATTRIBUTIONS
@@ -312,10 +341,112 @@ def test_create_lit_model_from_tensorflow_with_xai_returns_model(
assert len(item.values()) == 2
@pytest.mark.usefixtures(
- "predict_client_predict_mock", "get_endpoint_with_models_mock"
+ "predict_client_predict_dict_mock", "get_endpoint_with_models_mock"
+ )
+ @pytest.mark.parametrize("model_id", [None, _TEST_ID])
+ def test_create_lit_model_from_dict_endpoint_returns_model(
+ self, feature_types, label_types, model_id
+ ):
+ endpoint = aiplatform.Endpoint(_TEST_ENDPOINT_NAME)
+ lit_model = create_lit_model_from_endpoint(
+ endpoint, feature_types, label_types, model_id
+ )
+ test_inputs = [
+ {"feature_1": 1.0, "feature_2": 2.0},
+ ]
+ outputs = lit_model.predict_minibatch(test_inputs)
+
+ assert lit_model.input_spec() == dict(feature_types)
+ assert lit_model.output_spec() == dict(label_types)
+ assert len(outputs) == 1
+ for item in outputs:
+ assert item.keys() == {"label"}
+ assert len(item.values()) == 1
+
+ @pytest.mark.usefixtures(
+ "predict_client_explain_dict_mock",
+ "get_endpoint_with_models_with_explanation_mock",
+ )
+ @pytest.mark.parametrize("model_id", [None, _TEST_ID])
+ def test_create_lit_model_from_dict_endpoint_with_xai_returns_model(
+ self, feature_types, label_types, model_id
+ ):
+ endpoint = aiplatform.Endpoint(_TEST_ENDPOINT_NAME)
+ lit_model = create_lit_model_from_endpoint(
+ endpoint, feature_types, label_types, model_id
+ )
+ test_inputs = [
+ {"feature_1": 1.0, "feature_2": 2.0},
+ ]
+ outputs = lit_model.predict_minibatch(test_inputs)
+
+ assert lit_model.input_spec() == dict(feature_types)
+ assert lit_model.output_spec() == dict(
+ {
+ **label_types,
+ "feature_attribution": lit_types.FeatureSalience(signed=True),
+ }
+ )
+ assert len(outputs) == 1
+ for item in outputs:
+ assert item.keys() == {"label", "feature_attribution"}
+ assert len(item.values()) == 2
+
+ @pytest.mark.usefixtures(
+ "predict_client_predict_dict_mock", "get_endpoint_with_models_mock"
+ )
+ @pytest.mark.parametrize("model_id", [None, _TEST_ID])
+ def test_create_lit_model_from_dict_endpoint_name_returns_model(
+ self, feature_types, label_types, model_id
+ ):
+ lit_model = create_lit_model_from_endpoint(
+ _TEST_ENDPOINT_NAME, feature_types, label_types, model_id
+ )
+ test_inputs = [
+ {"feature_1": 1.0, "feature_2": 2.0},
+ ]
+ outputs = lit_model.predict_minibatch(test_inputs)
+
+ assert lit_model.input_spec() == dict(feature_types)
+ assert lit_model.output_spec() == dict(label_types)
+ assert len(outputs) == 1
+ for item in outputs:
+ assert item.keys() == {"label"}
+ assert len(item.values()) == 1
+
+ @pytest.mark.usefixtures(
+ "predict_client_explain_dict_mock",
+ "get_endpoint_with_models_with_explanation_mock",
+ )
+ @pytest.mark.parametrize("model_id", [None, _TEST_ID])
+ def test_create_lit_model_from_dict_endpoint_name_with_xai_returns_model(
+ self, feature_types, label_types, model_id
+ ):
+ lit_model = create_lit_model_from_endpoint(
+ _TEST_ENDPOINT_NAME, feature_types, label_types, model_id
+ )
+ test_inputs = [
+ {"feature_1": 1.0, "feature_2": 2.0},
+ ]
+ outputs = lit_model.predict_minibatch(test_inputs)
+
+ assert lit_model.input_spec() == dict(feature_types)
+ assert lit_model.output_spec() == dict(
+ {
+ **label_types,
+ "feature_attribution": lit_types.FeatureSalience(signed=True),
+ }
+ )
+ assert len(outputs) == 1
+ for item in outputs:
+ assert item.keys() == {"label", "feature_attribution"}
+ assert len(item.values()) == 2
+
+ @pytest.mark.usefixtures(
+ "predict_client_predict_list_mock", "get_endpoint_with_models_mock"
)
@pytest.mark.parametrize("model_id", [None, _TEST_ID])
- def test_create_lit_model_from_endpoint_returns_model(
+ def test_create_lit_model_from_list_endpoint_returns_model(
self, feature_types, label_types, model_id
):
endpoint = aiplatform.Endpoint(_TEST_ENDPOINT_NAME)
@@ -335,10 +466,11 @@ def test_create_lit_model_from_endpoint_returns_model(
assert len(item.values()) == 1
@pytest.mark.usefixtures(
- "predict_client_explain_mock", "get_endpoint_with_models_with_explanation_mock"
+ "predict_client_explain_list_mock",
+ "get_endpoint_with_models_with_explanation_mock",
)
@pytest.mark.parametrize("model_id", [None, _TEST_ID])
- def test_create_lit_model_from_endpoint_with_xai_returns_model(
+ def test_create_lit_model_from_list_endpoint_with_xai_returns_model(
self, feature_types, label_types, model_id
):
endpoint = aiplatform.Endpoint(_TEST_ENDPOINT_NAME)
@@ -363,10 +495,10 @@ def test_create_lit_model_from_endpoint_with_xai_returns_model(
assert len(item.values()) == 2
@pytest.mark.usefixtures(
- "predict_client_predict_mock", "get_endpoint_with_models_mock"
+ "predict_client_predict_list_mock", "get_endpoint_with_models_mock"
)
@pytest.mark.parametrize("model_id", [None, _TEST_ID])
- def test_create_lit_model_from_endpoint_name_returns_model(
+ def test_create_lit_model_from_list_endpoint_name_returns_model(
self, feature_types, label_types, model_id
):
lit_model = create_lit_model_from_endpoint(
@@ -385,10 +517,11 @@ def test_create_lit_model_from_endpoint_name_returns_model(
assert len(item.values()) == 1
@pytest.mark.usefixtures(
- "predict_client_explain_mock", "get_endpoint_with_models_with_explanation_mock"
+ "predict_client_explain_list_mock",
+ "get_endpoint_with_models_with_explanation_mock",
)
@pytest.mark.parametrize("model_id", [None, _TEST_ID])
- def test_create_lit_model_from_endpoint_name_with_xai_returns_model(
+ def test_create_lit_model_from_list_endpoint_name_with_xai_returns_model(
self, feature_types, label_types, model_id
):
lit_model = create_lit_model_from_endpoint(
From 7f72aabc46f86b7f5d975080f60439b8022db1d9 Mon Sep 17 00:00:00 2001
From: Johan Euphrosine
Date: Thu, 3 Mar 2022 09:10:53 +0900
Subject: [PATCH 13/15] chore: fix typo in README (#1035)
Co-authored-by: Karl Weinmeister <11586922+kweinmeister@users.noreply.github.com>
Co-authored-by: Anthonios Partheniou
Co-authored-by: Morgan Du
---
README.rst | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.rst b/README.rst
index 733348cc46..21411c486c 100644
--- a/README.rst
+++ b/README.rst
@@ -123,7 +123,7 @@ Initialize the SDK to store common configurations that you use with the SDK.
experiment='my-experiment',
# description of the experiment above
- experiment_description='my experiment decsription'
+ experiment_description='my experiment description'
)
Datasets
From dfbd68a79f1c892c4380405dd900deb6ac6574a6 Mon Sep 17 00:00:00 2001
From: Anthonios Partheniou
Date: Thu, 3 Mar 2022 12:23:02 -0500
Subject: [PATCH 14/15] fix(deps): require google-api-core>=1.31.5, >=2.3.2
(#1050)
fix(deps): require proto-plus>=1.15.0
---
setup.py | 4 ++--
testing/constraints-3.6.txt | 4 ++--
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/setup.py b/setup.py
index 01aa6686f8..967149d98c 100644
--- a/setup.py
+++ b/setup.py
@@ -95,8 +95,8 @@
# NOTE: Maintainers, please do not require google-api-core>=2.x.x
# Until this issue is closed
# https://github.com/googleapis/google-cloud-python/issues/10566
- "google-api-core[grpc] >= 1.26.0, <3.0.0dev",
- "proto-plus >= 1.10.1",
+ "google-api-core[grpc] >= 1.31.5, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0",
+ "proto-plus >= 1.15.0",
"packaging >= 14.3",
"google-cloud-storage >= 1.32.0, < 3.0.0dev",
"google-cloud-bigquery >= 1.15.0, < 3.0.0dev",
diff --git a/testing/constraints-3.6.txt b/testing/constraints-3.6.txt
index fc7d641771..0380c54f59 100644
--- a/testing/constraints-3.6.txt
+++ b/testing/constraints-3.6.txt
@@ -5,9 +5,9 @@
#
# e.g., if setup.py has "foo >= 1.14.0, < 2.0.0dev",
# Then this file should have foo==1.14.0
-google-api-core==1.26.0
+google-api-core==1.31.5
libcst==0.2.5
-proto-plus==1.10.1
+proto-plus==1.15.0
mock==4.0.2
google-cloud-storage==1.32.0
google-auth==1.25.0 # TODO: Remove when google-api-core >= 1.26.0 is required
From d8a5e0b2765af7a106e94b1d0207736ac91a0b43 Mon Sep 17 00:00:00 2001
From: "release-please[bot]"
<55107282+release-please[bot]@users.noreply.github.com>
Date: Thu, 3 Mar 2022 14:26:20 -0800
Subject: [PATCH 15/15] chore(main): release 1.11.0 (#1011)
Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com>
---
CHANGELOG.md | 28 ++++++++++++++++++++++++++++
google/cloud/aiplatform/version.py | 2 +-
2 files changed, 29 insertions(+), 1 deletion(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index c59ab328f4..530da59301 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,33 @@
# Changelog
+## [1.11.0](https://github.com/googleapis/python-aiplatform/compare/v1.10.0...v1.11.0) (2022-03-03)
+
+
+### Features
+
+* add additional_experiement flag in the tables and forecasting training job ([#979](https://github.com/googleapis/python-aiplatform/issues/979)) ([5fe59a4](https://github.com/googleapis/python-aiplatform/commit/5fe59a4015882d56c22f9973aff888966dd53a2e))
+* add TPU_V2 & TPU_V3 values to AcceleratorType in aiplatform v1/v1beta1 accelerator_type.proto ([#1010](https://github.com/googleapis/python-aiplatform/issues/1010)) ([09c2e8a](https://github.com/googleapis/python-aiplatform/commit/09c2e8a368c6d265d99acfb12addd5ba6f1a50e6))
+* Added scheduling to CustomTrainingJob, CustomPythonPackageTrainingJob, CustomContainerTrainingJob ([#970](https://github.com/googleapis/python-aiplatform/issues/970)) ([89078e0](https://github.com/googleapis/python-aiplatform/commit/89078e0d2a719e2b0d25ae36ecd06c356a5a33c9))
+
+
+### Bug Fixes
+
+* **deps:** allow google-cloud-storage < 3.0.0dev ([#1008](https://github.com/googleapis/python-aiplatform/issues/1008)) ([1c34154](https://github.com/googleapis/python-aiplatform/commit/1c341544e9bd94c6ff0ee41177565c8c078673a3))
+* **deps:** require google-api-core>=1.31.5, >=2.3.2 ([#1050](https://github.com/googleapis/python-aiplatform/issues/1050)) ([dfbd68a](https://github.com/googleapis/python-aiplatform/commit/dfbd68a79f1c892c4380405dd900deb6ac6574a6))
+* **deps:** require proto-plus>=1.15.0 ([dfbd68a](https://github.com/googleapis/python-aiplatform/commit/dfbd68a79f1c892c4380405dd900deb6ac6574a6))
+* enforce bq SchemaField field_type and mode using feature value_type ([#1019](https://github.com/googleapis/python-aiplatform/issues/1019)) ([095bea2](https://github.com/googleapis/python-aiplatform/commit/095bea23bc15a490ddbb1a8edac7f5db626bc659))
+* Fix create_lit_model_from_endpoint not accepting models that don't return a dictionary. ([#1020](https://github.com/googleapis/python-aiplatform/issues/1020)) ([b9a057d](https://github.com/googleapis/python-aiplatform/commit/b9a057d001deb8727cb725d44bb5528dce330653))
+* loosen assertions for system test featurestore ([#1040](https://github.com/googleapis/python-aiplatform/issues/1040)) ([2ba404f](https://github.com/googleapis/python-aiplatform/commit/2ba404f8bfbccd7a18ef613417912ed94882c4bd))
+* remove empty scripts kwarg in setup.py ([#1014](https://github.com/googleapis/python-aiplatform/issues/1014)) ([ef3fcc8](https://github.com/googleapis/python-aiplatform/commit/ef3fcc86fb3808b37706470c8c49903ec3a302fb))
+* show logs when TFX pipelines are submitted ([#976](https://github.com/googleapis/python-aiplatform/issues/976)) ([c10923b](https://github.com/googleapis/python-aiplatform/commit/c10923b47b9b9941d14ae2c5398348d971a23f9d))
+* update system test_model_upload to use BUILD_SPECIFIC_GCP_PROJECT ([#1043](https://github.com/googleapis/python-aiplatform/issues/1043)) ([e7d2719](https://github.com/googleapis/python-aiplatform/commit/e7d27193f323f88f4238206ecb380d746d98df31))
+
+
+### Documentation
+
+* **samples:** add samples to create/delete featurestore ([#980](https://github.com/googleapis/python-aiplatform/issues/980)) ([5ee6354](https://github.com/googleapis/python-aiplatform/commit/5ee6354a12c6422015acb81caef32d6d2f52c838))
+* **samples:** added create feature and create entity type samples and tests ([#984](https://github.com/googleapis/python-aiplatform/issues/984)) ([d221e6b](https://github.com/googleapis/python-aiplatform/commit/d221e6bebd7fb98a8c6e3f3b8ae507f2f214128f))
+
## [1.10.0](https://github.com/googleapis/python-aiplatform/compare/v1.9.0...v1.10.0) (2022-02-07)
diff --git a/google/cloud/aiplatform/version.py b/google/cloud/aiplatform/version.py
index f15161a089..b8e5d4d7f3 100644
--- a/google/cloud/aiplatform/version.py
+++ b/google/cloud/aiplatform/version.py
@@ -15,4 +15,4 @@
# limitations under the License.
#
-__version__ = "1.10.0"
+__version__ = "1.11.0"