diff --git a/.coveragerc b/.coveragerc
new file mode 100644
index 0000000000..5b3f287a0f
--- /dev/null
+++ b/.coveragerc
@@ -0,0 +1,18 @@
+[run]
+branch = True
+
+[report]
+fail_under = 100
+show_missing = True
+omit =
+ google/cloud/aiplatform/v1/schema/trainingjob/definition/__init__.py
+exclude_lines =
+ # Re-enable the standard pragma
+ pragma: NO COVER
+ # Ignore debug-only repr
+ def __repr__
+ # Ignore pkg_resources exceptions.
+ # This is added at the module level as a safeguard for if someone
+ # generates the code and tries to run it without pip installing. This
+ # makes it virtually impossible to test properly.
+ except pkg_resources.DistributionNotFound
diff --git a/.flake8 b/.flake8
index ed9316381c..29227d4cf4 100644
--- a/.flake8
+++ b/.flake8
@@ -26,6 +26,7 @@ exclude =
*_pb2.py
# Standard linting exemptions.
+ **/.nox/**
__pycache__,
.git,
*.pyc,
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 4910f51865..8de778714c 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -12,6 +12,7 @@
# The AI Platform SDK is owned by Model Builder SDK Dev team
/google/cloud/aiplatform/* @googleapis/cloud-aiplatform-model-builder-sdk
+/tests/unit/aiplatform/* @googleapis/cloud-aiplatform-model-builder-sdk
# The python-samples-owners team is the default owner for samples
/samples/**/*.py @dizcology @googleapis/python-samples-owners
diff --git a/.github/header-checker-lint.yml b/.github/header-checker-lint.yml
new file mode 100644
index 0000000000..fc281c05bd
--- /dev/null
+++ b/.github/header-checker-lint.yml
@@ -0,0 +1,15 @@
+{"allowedCopyrightHolders": ["Google LLC"],
+ "allowedLicenses": ["Apache-2.0", "MIT", "BSD-3"],
+ "ignoreFiles": ["**/requirements.txt", "**/requirements-test.txt"],
+ "sourceFileExtensions": [
+ "ts",
+ "js",
+ "java",
+ "sh",
+ "Dockerfile",
+ "yaml",
+ "py",
+ "html",
+ "txt"
+ ]
+}
\ No newline at end of file
diff --git a/.kokoro/build.sh b/.kokoro/build.sh
index 4c37ecad0c..ef4eb9c094 100755
--- a/.kokoro/build.sh
+++ b/.kokoro/build.sh
@@ -15,7 +15,11 @@
set -eo pipefail
-cd github/python-aiplatform
+if [[ -z "${PROJECT_ROOT:-}" ]]; then
+ PROJECT_ROOT="github/python-aiplatform"
+fi
+
+cd "${PROJECT_ROOT}"
# Disable buffering, so that the logs stream through.
export PYTHONUNBUFFERED=1
@@ -30,16 +34,16 @@ export GOOGLE_APPLICATION_CREDENTIALS=${KOKORO_GFILE_DIR}/service-account.json
export PROJECT_ID=$(cat "${KOKORO_GFILE_DIR}/project-id.json")
# Remove old nox
-python3.6 -m pip uninstall --yes --quiet nox-automation
+python3 -m pip uninstall --yes --quiet nox-automation
# Install nox
-python3.6 -m pip install --upgrade --quiet nox
-python3.6 -m nox --version
+python3 -m pip install --upgrade --quiet nox
+python3 -m nox --version
# If NOX_SESSION is set, it only runs the specified session,
# otherwise run all the sessions.
if [[ -n "${NOX_SESSION:-}" ]]; then
- python3.6 -m nox -s "${NOX_SESSION:-}"
+ python3 -m nox -s ${NOX_SESSION:-}
else
- python3.6 -m nox
+ python3 -m nox
fi
diff --git a/.kokoro/docs/docs-presubmit.cfg b/.kokoro/docs/docs-presubmit.cfg
index 1118107829..85c4e08775 100644
--- a/.kokoro/docs/docs-presubmit.cfg
+++ b/.kokoro/docs/docs-presubmit.cfg
@@ -15,3 +15,14 @@ env_vars: {
key: "TRAMPOLINE_IMAGE_UPLOAD"
value: "false"
}
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/python-aiplatform/.kokoro/build.sh"
+}
+
+# Only run this nox session.
+env_vars: {
+ key: "NOX_SESSION"
+ value: "docs docfx"
+}
diff --git a/.kokoro/test-samples.sh b/.kokoro/test-samples.sh
index aed13be6d4..4c034fa7c7 100755
--- a/.kokoro/test-samples.sh
+++ b/.kokoro/test-samples.sh
@@ -87,11 +87,11 @@ for file in samples/**/requirements.txt; do
python3.6 -m nox -s "$RUN_TESTS_SESSION"
EXIT=$?
- # If this is a periodic build, send the test log to the Build Cop Bot.
- # See https://github.com/googleapis/repo-automation-bots/tree/master/packages/buildcop.
+ # If this is a periodic build, send the test log to the FlakyBot.
+ # See https://github.com/googleapis/repo-automation-bots/tree/master/packages/flakybot.
if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then
- chmod +x $KOKORO_GFILE_DIR/linux_amd64/buildcop
- $KOKORO_GFILE_DIR/linux_amd64/buildcop
+ chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot
+ $KOKORO_GFILE_DIR/linux_amd64/flakybot
fi
if [[ $EXIT -ne 0 ]]; then
diff --git a/.kokoro/trampoline_v2.sh b/.kokoro/trampoline_v2.sh
index 719bcd5ba8..4af6cdc26d 100755
--- a/.kokoro/trampoline_v2.sh
+++ b/.kokoro/trampoline_v2.sh
@@ -159,7 +159,7 @@ if [[ -n "${KOKORO_BUILD_ID:-}" ]]; then
"KOKORO_GITHUB_COMMIT"
"KOKORO_GITHUB_PULL_REQUEST_NUMBER"
"KOKORO_GITHUB_PULL_REQUEST_COMMIT"
- # For Build Cop Bot
+ # For FlakyBot
"KOKORO_GITHUB_COMMIT_URL"
"KOKORO_GITHUB_PULL_REQUEST_URL"
)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 0000000000..a9024b15d7
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,17 @@
+# See https://pre-commit.com for more information
+# See https://pre-commit.com/hooks.html for more hooks
+repos:
+- repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v3.4.0
+ hooks:
+ - id: trailing-whitespace
+ - id: end-of-file-fixer
+ - id: check-yaml
+- repo: https://github.com/psf/black
+ rev: 19.10b0
+ hooks:
+ - id: black
+- repo: https://gitlab.com/pycqa/flake8
+ rev: 3.8.4
+ hooks:
+ - id: flake8
diff --git a/.sample_configs/param_handlers/create_training_pipeline_text_sentiment_analysis_sample.py b/.sample_configs/param_handlers/create_training_pipeline_text_sentiment_analysis_sample.py
index db597c6611..744f463361 100644
--- a/.sample_configs/param_handlers/create_training_pipeline_text_sentiment_analysis_sample.py
+++ b/.sample_configs/param_handlers/create_training_pipeline_text_sentiment_analysis_sample.py
@@ -18,7 +18,7 @@ def make_parent(parent: str) -> str:
def make_training_pipeline(display_name: str, dataset_id: str, model_display_name: str) -> google.cloud.aiplatform_v1alpha1.types.training_pipeline.TrainingPipeline:
# Use sentiment_max of 4
- training_task_inputs_dict = {"sentiment_max": 4}
+ training_task_inputs_dict = {"sentiment_max": 10}
training_task_inputs = to_protobuf_value(training_task_inputs_dict)
training_pipeline = {
diff --git a/.sample_configs/param_handlers/upload_model_explain_image_managed_container_sample.py b/.sample_configs/param_handlers/upload_model_explain_image_managed_container_sample.py
index 6710c55080..10995aaed6 100644
--- a/.sample_configs/param_handlers/upload_model_explain_image_managed_container_sample.py
+++ b/.sample_configs/param_handlers/upload_model_explain_image_managed_container_sample.py
@@ -30,11 +30,11 @@ def make_model(
container_spec = {"image_uri": container_spec_image_uri, "command": [], "args": []}
# The explainabilty method and corresponding parameters
- parameters = aiplatform.gapic.ExplanationParameters({"xrai_attribution": { "step_count": 1}})
+ parameters = aiplatform_v1beta1.ExplanationParameters({"xrai_attribution": { "step_count": 1}})
# The input tensor for feature attribution to the output
# For single input model, y = f(x), this will be the serving input layer.
- input_metadata = aiplatform.gapic.ExplanationMetadata.InputMetadata({
+ input_metadata = aiplatform_v1beta1.ExplanationMetadata.InputMetadata({
"input_tensor_name": input_tensor_name,
# Input is image data
"modality": "image",
@@ -42,23 +42,23 @@ def make_model(
# The output tensor to explain
# For single output model, y = f(x), this will be the serving output layer.
- output_metadata = aiplatform.gapic.ExplanationMetadata.OutputMetadata({
+ output_metadata = aiplatform_v1beta1.ExplanationMetadata.OutputMetadata({
"output_tensor_name": output_tensor_name
})
# Assemble the explanation metadata
- metadata = aiplatform.gapic.ExplanationMetadata(
+ metadata = aiplatform_v1beta1.ExplanationMetadata(
inputs={'image': input_metadata},
outputs={'prediction' : output_metadata}
)
# Assemble the explanation specification
- explanation_spec = aiplatform.gapic.ExplanationSpec(
+ explanation_spec = aiplatform_v1beta1.ExplanationSpec(
parameters=parameters,
metadata=metadata
)
- model = aiplatform.gapic.Model(display_name=display_name,
+ model = aiplatform_v1beta1.Model(display_name=display_name,
# The Cloud Storage location of the custom model
artifact_uri=artifact_uri,
explanation_spec=explanation_spec,
diff --git a/.sample_configs/param_handlers/upload_model_explain_tabular_managed_container_sample.py b/.sample_configs/param_handlers/upload_model_explain_tabular_managed_container_sample.py
index 5c23bccd75..77156e516b 100644
--- a/.sample_configs/param_handlers/upload_model_explain_tabular_managed_container_sample.py
+++ b/.sample_configs/param_handlers/upload_model_explain_tabular_managed_container_sample.py
@@ -31,11 +31,11 @@ def make_model(
container_spec = {"image_uri": container_spec_image_uri, "command": [], "args": []}
# The explainabilty method and corresponding parameters
- parameters = aiplatform.gapic.ExplanationParameters({"xrai_attribution": { "step_count": 1}})
+ parameters = aiplatform_v1beta1.ExplanationParameters({"xrai_attribution": { "step_count": 1}})
# The input tensor for feature attribution to the output
# For single input model, y = f(x), this will be the serving input layer.
- input_metadata = aiplatform.gapic.ExplanationMetadata.InputMetadata({
+ input_metadata = aiplatform_v1beta1.ExplanationMetadata.InputMetadata({
"input_tensor_name": input_tensor_name,
# Input is tabular data
"modality": "numeric",
@@ -46,23 +46,23 @@ def make_model(
# The output tensor to explain
# For single output model, y = f(x), this will be the serving output layer.
- output_metadata = aiplatform.gapic.ExplanationMetadata.OutputMetadata({
+ output_metadata = aiplatform_v1beta1.ExplanationMetadata.OutputMetadata({
"output_tensor_name": output_tensor_name
})
# Assemble the explanation metadata
- metadata = aiplatform.gapic.ExplanationMetadata(
+ metadata = aiplatform_v1beta1.ExplanationMetadata(
inputs={'features': input_metadata},
outputs={'prediction' : output_metadata}
)
# Assemble the explanation specification
- explanation_spec = aiplatform.gapic.ExplanationSpec(
+ explanation_spec = aiplatform_v1beta1.ExplanationSpec(
parameters=parameters,
metadata=metadata
)
- model = aiplatform.gapic.Model(display_name=display_name,
+ model = aiplatform_v1beta1.Model(display_name=display_name,
# The Cloud Storage location of the custom model
artifact_uri=artifact_uri,
explanation_spec=explanation_spec,
diff --git a/.sample_configs/process_configs.yaml b/.sample_configs/process_configs.yaml
index e89a42e4c8..4e6608b4fd 100644
--- a/.sample_configs/process_configs.yaml
+++ b/.sample_configs/process_configs.yaml
@@ -14,12 +14,16 @@ cancel_custom_job_sample: {}
cancel_data_labeling_job_sample: {}
cancel_hyperparameter_tuning_job_sample: {}
cancel_training_pipeline_sample: {}
-create_batch_prediction_job_bigquery_sample: {}
+create_batch_prediction_job_bigquery_sample:
+ gapic_module_child: aiplatform_v1beta1
+ namespace: null
create_batch_prediction_job_custom_image_explain_sample: {}
create_batch_prediction_job_custom_tabular_explain_sample: {}
create_batch_prediction_job_sample: {}
create_batch_prediction_job_tabular_explain_sample: {}
-create_batch_prediction_job_tabular_forecasting_sample: {}
+create_batch_prediction_job_tabular_forecasting_sample:
+ gapic_module_child: aiplatform_v1beta1
+ namespace: null
create_batch_prediction_job_text_classification_sample: {}
create_batch_prediction_job_text_entity_extraction_sample: {}
create_batch_prediction_job_text_sentiment_analysis_sample: {}
@@ -120,25 +124,23 @@ deploy_model_sample:
skip:
- explanation_spec
explain_custom_image_sample:
- api_endpoint: us-central1-prediction-aiplatform.googleapis.com
max_depth: 1
resource_name: endpoint
explain_custom_tabular_sample:
- api_endpoint: us-central1-prediction-aiplatform.googleapis.com
max_depth: 1
resource_name: endpoint
comments:
approximation_error: This is the approximation error.
attributions: Feature attributions.
explain_sample:
- api_endpoint: us-central1-prediction-aiplatform.googleapis.com
max_depth: 1
resource_name: endpoint
comments:
approximation_error: This is the approximation error.
attributions: Feature attributions.
explain_tabular_sample:
- api_endpoint: us-central1-prediction-aiplatform.googleapis.com
+ gapic_module_child: aiplatform_v1beta1
+ namespace: null
max_depth: 2
resource_name: endpoint
comments:
@@ -148,11 +150,23 @@ export_data_sample:
resource_name: dataset
export_evaluated_data_items_sample:
resource_name: model_evaluation
-export_model_sample: {}
-export_model_tabular_classification_sample: {}
-export_model_video_action_recognition_sample: {}
-export_model_video_classification_sample: {}
-export_model_video_object_tracking_sample: {}
+export_model_sample:
+ lro_metadata:
+ - output_info
+export_model_tabular_classification_sample:
+ gapic_module_child: aiplatform_v1beta1
+ namespace: null
+ lro_metadata:
+ - output_info
+export_model_video_action_recognition_sample:
+ lro_metadata:
+ - output_info
+export_model_video_classification_sample:
+ lro_metadata:
+ - output_info
+export_model_video_object_tracking_sample:
+ lro_metadata:
+ - output_info
get_annotation_spec_sample: {}
get_batch_prediction_job_sample:
skip:
@@ -240,14 +254,12 @@ list_models_sample: {}
list_specialist_pools_sample: {}
list_training_pipelines_sample: {}
predict_custom_trained_model_sample:
- api_endpoint: us-central1-prediction-aiplatform.googleapis.com
max_depth: 1
resource_name: endpoint
comments:
predictions: The predictions are a google.protobuf.Value representation of the
model's predictions.
predict_image_classification_sample:
- api_endpoint: us-central1-prediction-aiplatform.googleapis.com
max_depth: 1
resource_name: endpoint
schema_types:
@@ -257,11 +269,9 @@ predict_image_classification_sample:
predictions: See gs://google-cloud-aiplatform/schema/predict/prediction/classification.yaml
for the format of the predictions.
predict_image_file_sample:
- api_endpoint: us-central1-prediction-aiplatform.googleapis.com
max_depth: 1
resource_name: endpoint
predict_image_object_detection_sample:
- api_endpoint: us-central1-prediction-aiplatform.googleapis.com
max_depth: 1
resource_name: endpoint
schema_types:
@@ -271,14 +281,12 @@ predict_image_object_detection_sample:
predictions: See gs://google-cloud-aiplatform/schema/predict/prediction/image_object_detection.yaml
for the format of the predictions.
predict_sample:
- api_endpoint: us-central1-prediction-aiplatform.googleapis.com
max_depth: 1
resource_name: endpoint
region_tags:
- aiplatform_predict_sample
- aiplatform_predict_tutorial
predict_tabular_classification_sample:
- api_endpoint: us-central1-prediction-aiplatform.googleapis.com
max_depth: 1
resource_name: endpoint
comments:
@@ -286,14 +294,12 @@ predict_tabular_classification_sample:
for the format of the predictions.
predict_tabular_forecasting_sample: {}
predict_tabular_regression_sample:
- api_endpoint: us-central1-prediction-aiplatform.googleapis.com
max_depth: 1
resource_name: endpoint
comments:
predictions: See gs://google-cloud-aiplatform/schema/predict/prediction/tables_regression.yaml
for the format of the predictions.
predict_text_classification_single_label_sample:
- api_endpoint: us-central1-prediction-aiplatform.googleapis.com
max_depth: 1
resource_name: endpoint
schema_types:
@@ -302,7 +308,6 @@ predict_text_classification_single_label_sample:
predictions: See gs://google-cloud-aiplatform/schema/predict/prediction/text_classification.yaml
for the format of the predictions.
predict_text_entity_extraction_sample:
- api_endpoint: us-central1-prediction-aiplatform.googleapis.com
max_depth: 1
resource_name: endpoint
schema_types:
@@ -311,7 +316,6 @@ predict_text_entity_extraction_sample:
predictions: See gs://google-cloud-aiplatform/schema/predict/prediction/text_extraction.yaml
for the format of the predictions.
predict_text_sentiment_analysis_sample:
- api_endpoint: us-central1-prediction-aiplatform.googleapis.com
max_depth: 1
resource_name: endpoint
schema_types:
@@ -328,8 +332,12 @@ update_model_sample: {}
update_specialist_pool_sample: {}
upload_model_custom_container_sample:
timeout: 1800
-upload_model_explain_image_managed_container_sample: {}
-upload_model_explain_tabular_managed_container_sample: {}
+upload_model_explain_image_managed_container_sample:
+ gapic_module_child: aiplatform_v1beta1
+ namespace: null
+upload_model_explain_tabular_managed_container_sample:
+ gapic_module_child: aiplatform_v1beta1
+ namespace: null
upload_model_managed_container_sample:
timeout: 1800
upload_model_sample:
diff --git a/.trampolinerc b/.trampolinerc
index 995ee29111..383b6ec89f 100644
--- a/.trampolinerc
+++ b/.trampolinerc
@@ -24,6 +24,7 @@ required_envvars+=(
pass_down_envvars+=(
"STAGING_BUCKET"
"V2_STAGING_BUCKET"
+ "NOX_SESSION"
)
# Prevent unintentional override on the default image.
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 7e2857250b..315d5a8da1 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,23 @@
# Changelog
+## [0.5.0](https://www.github.com/googleapis/python-aiplatform/compare/v0.4.0...v0.5.0) (2021-02-17)
+
+
+### Features
+
+* exposes v1 enhanced types and adds tests ([#226](https://www.github.com/googleapis/python-aiplatform/issues/226)) ([42b587d](https://www.github.com/googleapis/python-aiplatform/commit/42b587de2805b9efacb6e1eb5bf05e50ffb37797))
+* LRO metadata ([#204](https://www.github.com/googleapis/python-aiplatform/issues/204)) ([2863dc0](https://www.github.com/googleapis/python-aiplatform/commit/2863dc0ba2337a0e997b95e2cb8669abd62635e3))
+* moves manual enhanced lib edits outside of generated files ([#198](https://www.github.com/googleapis/python-aiplatform/issues/198)) ([a04a561](https://www.github.com/googleapis/python-aiplatform/commit/a04a5613cec36811db8768da5ea7c3229da3074b))
+* updates python-aiplatform to v1 ([#212](https://www.github.com/googleapis/python-aiplatform/issues/212)) ([efc00ed](https://www.github.com/googleapis/python-aiplatform/commit/efc00ed6bb838dceaee7ad9469cc51d1500a365d))
+
+
+### Bug Fixes
+
+* correct text sentiment analysis sample ([#222](https://www.github.com/googleapis/python-aiplatform/issues/222)) ([0befde3](https://www.github.com/googleapis/python-aiplatform/commit/0befde36bfd4ff1b5161b7ceb3bb55f6e7d8ea37))
+* **deps:** remove optional dependencies ([#187](https://www.github.com/googleapis/python-aiplatform/issues/187)) ([6589383](https://www.github.com/googleapis/python-aiplatform/commit/6589383f149fcf463d153fe76973bd874ff3967a))
+* Fix sample test ([#215](https://www.github.com/googleapis/python-aiplatform/issues/215)) ([cdeb0ec](https://www.github.com/googleapis/python-aiplatform/commit/cdeb0ec30c334ff2b5d5e06bc976e824d6e18c04))
+* reduces image size for test image ([#213](https://www.github.com/googleapis/python-aiplatform/issues/213)) ([3ed0e09](https://www.github.com/googleapis/python-aiplatform/commit/3ed0e0961f104762194d9ac598a81017ac9d2392))
+
## [0.4.0](https://www.github.com/googleapis/python-aiplatform/compare/v0.3.1...v0.4.0) (2021-01-08)
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index d206a2d88d..3cab430ce1 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -21,8 +21,8 @@ In order to add a feature:
- The feature must be documented in both the API and narrative
documentation.
-- The feature must work fully on the following CPython versions: 2.7,
- 3.5, 3.6, 3.7 and 3.8 on both UNIX and Windows.
+- The feature must work fully on the following CPython versions:
+ 3.6, 3.7, 3.8 and 3.9 on both UNIX and Windows.
- The feature must not add unnecessary dependencies (where
"unnecessary" is of course subjective, but new dependencies should
@@ -111,6 +111,16 @@ Coding Style
should point to the official ``googleapis`` checkout and the
the branch should be the main branch on that remote (``master``).
+- This repository contains configuration for the
+ `pre-commit `__ tool, which automates checking
+ our linters during a commit. If you have it installed on your ``$PATH``,
+ you can enable enforcing those checks via:
+
+.. code-block:: bash
+
+ $ pre-commit install
+ pre-commit installed at .git/hooks/pre-commit
+
Exceptions to PEP8:
- Many unit tests use a helper method, ``_call_fut`` ("FUT" is short for
@@ -192,25 +202,24 @@ Supported Python Versions
We support:
-- `Python 3.5`_
- `Python 3.6`_
- `Python 3.7`_
- `Python 3.8`_
+- `Python 3.9`_
-.. _Python 3.5: https://docs.python.org/3.5/
.. _Python 3.6: https://docs.python.org/3.6/
.. _Python 3.7: https://docs.python.org/3.7/
.. _Python 3.8: https://docs.python.org/3.8/
+.. _Python 3.9: https://docs.python.org/3.9/
Supported versions can be found in our ``noxfile.py`` `config`_.
.. _config: https://github.com/googleapis/python-aiplatform/blob/master/noxfile.py
-Python 2.7 support is deprecated. All code changes should maintain Python 2.7 compatibility until January 1, 2020.
We also explicitly decided to support Python 3 beginning with version
-3.5. Reasons for this include:
+3.6. Reasons for this include:
- Encouraging use of newest versions of Python 3
- Taking the lead of `prominent`_ open-source `projects`_
diff --git a/LICENSE b/LICENSE
index a8ee855de2..d645695673 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,6 +1,7 @@
- Apache License
+
+ Apache License
Version 2.0, January 2004
- https://www.apache.org/licenses/
+ http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
@@ -192,7 +193,7 @@
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- https://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/docs/_static/custom.css b/docs/_static/custom.css
index 0abaf229fc..bcd37bbd3c 100644
--- a/docs/_static/custom.css
+++ b/docs/_static/custom.css
@@ -1,4 +1,9 @@
div#python2-eol {
border-color: red;
border-width: medium;
-}
\ No newline at end of file
+}
+
+/* Ensure minimum width for 'Parameters' / 'Returns' column */
+dl.field-list > dt {
+ min-width: 100px
+}
diff --git a/docs/aiplatform_v1/dataset_service.rst b/docs/aiplatform_v1/dataset_service.rst
new file mode 100644
index 0000000000..46694cf2c0
--- /dev/null
+++ b/docs/aiplatform_v1/dataset_service.rst
@@ -0,0 +1,11 @@
+DatasetService
+--------------------------------
+
+.. automodule:: google.cloud.aiplatform_v1.services.dataset_service
+ :members:
+ :inherited-members:
+
+
+.. automodule:: google.cloud.aiplatform_v1.services.dataset_service.pagers
+ :members:
+ :inherited-members:
diff --git a/docs/aiplatform_v1/endpoint_service.rst b/docs/aiplatform_v1/endpoint_service.rst
new file mode 100644
index 0000000000..29d05c30b4
--- /dev/null
+++ b/docs/aiplatform_v1/endpoint_service.rst
@@ -0,0 +1,11 @@
+EndpointService
+---------------------------------
+
+.. automodule:: google.cloud.aiplatform_v1.services.endpoint_service
+ :members:
+ :inherited-members:
+
+
+.. automodule:: google.cloud.aiplatform_v1.services.endpoint_service.pagers
+ :members:
+ :inherited-members:
diff --git a/docs/aiplatform_v1/job_service.rst b/docs/aiplatform_v1/job_service.rst
new file mode 100644
index 0000000000..6bfd457244
--- /dev/null
+++ b/docs/aiplatform_v1/job_service.rst
@@ -0,0 +1,11 @@
+JobService
+----------------------------
+
+.. automodule:: google.cloud.aiplatform_v1.services.job_service
+ :members:
+ :inherited-members:
+
+
+.. automodule:: google.cloud.aiplatform_v1.services.job_service.pagers
+ :members:
+ :inherited-members:
diff --git a/docs/aiplatform_v1/migration_service.rst b/docs/aiplatform_v1/migration_service.rst
new file mode 100644
index 0000000000..f322a1b3bf
--- /dev/null
+++ b/docs/aiplatform_v1/migration_service.rst
@@ -0,0 +1,11 @@
+MigrationService
+----------------------------------
+
+.. automodule:: google.cloud.aiplatform_v1.services.migration_service
+ :members:
+ :inherited-members:
+
+
+.. automodule:: google.cloud.aiplatform_v1.services.migration_service.pagers
+ :members:
+ :inherited-members:
diff --git a/docs/aiplatform_v1/model_service.rst b/docs/aiplatform_v1/model_service.rst
new file mode 100644
index 0000000000..ca269a9ad2
--- /dev/null
+++ b/docs/aiplatform_v1/model_service.rst
@@ -0,0 +1,11 @@
+ModelService
+------------------------------
+
+.. automodule:: google.cloud.aiplatform_v1.services.model_service
+ :members:
+ :inherited-members:
+
+
+.. automodule:: google.cloud.aiplatform_v1.services.model_service.pagers
+ :members:
+ :inherited-members:
diff --git a/docs/aiplatform_v1/pipeline_service.rst b/docs/aiplatform_v1/pipeline_service.rst
new file mode 100644
index 0000000000..b718db39b4
--- /dev/null
+++ b/docs/aiplatform_v1/pipeline_service.rst
@@ -0,0 +1,11 @@
+PipelineService
+---------------------------------
+
+.. automodule:: google.cloud.aiplatform_v1.services.pipeline_service
+ :members:
+ :inherited-members:
+
+
+.. automodule:: google.cloud.aiplatform_v1.services.pipeline_service.pagers
+ :members:
+ :inherited-members:
diff --git a/docs/aiplatform_v1/prediction_service.rst b/docs/aiplatform_v1/prediction_service.rst
new file mode 100644
index 0000000000..fdda504879
--- /dev/null
+++ b/docs/aiplatform_v1/prediction_service.rst
@@ -0,0 +1,6 @@
+PredictionService
+-----------------------------------
+
+.. automodule:: google.cloud.aiplatform_v1.services.prediction_service
+ :members:
+ :inherited-members:
diff --git a/docs/aiplatform_v1/services.rst b/docs/aiplatform_v1/services.rst
new file mode 100644
index 0000000000..fd5a8c9aa7
--- /dev/null
+++ b/docs/aiplatform_v1/services.rst
@@ -0,0 +1,13 @@
+Services for Google Cloud Aiplatform v1 API
+===========================================
+.. toctree::
+ :maxdepth: 2
+
+ dataset_service
+ endpoint_service
+ job_service
+ migration_service
+ model_service
+ pipeline_service
+ prediction_service
+ specialist_pool_service
diff --git a/docs/aiplatform_v1/specialist_pool_service.rst b/docs/aiplatform_v1/specialist_pool_service.rst
new file mode 100644
index 0000000000..37ac386b31
--- /dev/null
+++ b/docs/aiplatform_v1/specialist_pool_service.rst
@@ -0,0 +1,11 @@
+SpecialistPoolService
+---------------------------------------
+
+.. automodule:: google.cloud.aiplatform_v1.services.specialist_pool_service
+ :members:
+ :inherited-members:
+
+
+.. automodule:: google.cloud.aiplatform_v1.services.specialist_pool_service.pagers
+ :members:
+ :inherited-members:
diff --git a/docs/aiplatform_v1/types.rst b/docs/aiplatform_v1/types.rst
new file mode 100644
index 0000000000..ad4454843f
--- /dev/null
+++ b/docs/aiplatform_v1/types.rst
@@ -0,0 +1,7 @@
+Types for Google Cloud Aiplatform v1 API
+========================================
+
+.. automodule:: google.cloud.aiplatform_v1.types
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/aiplatform_v1beta1/dataset_service.rst b/docs/aiplatform_v1beta1/dataset_service.rst
new file mode 100644
index 0000000000..ad3866e1e4
--- /dev/null
+++ b/docs/aiplatform_v1beta1/dataset_service.rst
@@ -0,0 +1,11 @@
+DatasetService
+--------------------------------
+
+.. automodule:: google.cloud.aiplatform_v1beta1.services.dataset_service
+ :members:
+ :inherited-members:
+
+
+.. automodule:: google.cloud.aiplatform_v1beta1.services.dataset_service.pagers
+ :members:
+ :inherited-members:
diff --git a/docs/aiplatform_v1beta1/endpoint_service.rst b/docs/aiplatform_v1beta1/endpoint_service.rst
new file mode 100644
index 0000000000..c5ce91ed19
--- /dev/null
+++ b/docs/aiplatform_v1beta1/endpoint_service.rst
@@ -0,0 +1,11 @@
+EndpointService
+---------------------------------
+
+.. automodule:: google.cloud.aiplatform_v1beta1.services.endpoint_service
+ :members:
+ :inherited-members:
+
+
+.. automodule:: google.cloud.aiplatform_v1beta1.services.endpoint_service.pagers
+ :members:
+ :inherited-members:
diff --git a/docs/aiplatform_v1beta1/job_service.rst b/docs/aiplatform_v1beta1/job_service.rst
new file mode 100644
index 0000000000..eee169a096
--- /dev/null
+++ b/docs/aiplatform_v1beta1/job_service.rst
@@ -0,0 +1,11 @@
+JobService
+----------------------------
+
+.. automodule:: google.cloud.aiplatform_v1beta1.services.job_service
+ :members:
+ :inherited-members:
+
+
+.. automodule:: google.cloud.aiplatform_v1beta1.services.job_service.pagers
+ :members:
+ :inherited-members:
diff --git a/docs/aiplatform_v1beta1/migration_service.rst b/docs/aiplatform_v1beta1/migration_service.rst
new file mode 100644
index 0000000000..42ff54c101
--- /dev/null
+++ b/docs/aiplatform_v1beta1/migration_service.rst
@@ -0,0 +1,11 @@
+MigrationService
+----------------------------------
+
+.. automodule:: google.cloud.aiplatform_v1beta1.services.migration_service
+ :members:
+ :inherited-members:
+
+
+.. automodule:: google.cloud.aiplatform_v1beta1.services.migration_service.pagers
+ :members:
+ :inherited-members:
diff --git a/docs/aiplatform_v1beta1/model_service.rst b/docs/aiplatform_v1beta1/model_service.rst
new file mode 100644
index 0000000000..0fc01a1bd6
--- /dev/null
+++ b/docs/aiplatform_v1beta1/model_service.rst
@@ -0,0 +1,11 @@
+ModelService
+------------------------------
+
+.. automodule:: google.cloud.aiplatform_v1beta1.services.model_service
+ :members:
+ :inherited-members:
+
+
+.. automodule:: google.cloud.aiplatform_v1beta1.services.model_service.pagers
+ :members:
+ :inherited-members:
diff --git a/docs/aiplatform_v1beta1/pipeline_service.rst b/docs/aiplatform_v1beta1/pipeline_service.rst
new file mode 100644
index 0000000000..465949eeb0
--- /dev/null
+++ b/docs/aiplatform_v1beta1/pipeline_service.rst
@@ -0,0 +1,11 @@
+PipelineService
+---------------------------------
+
+.. automodule:: google.cloud.aiplatform_v1beta1.services.pipeline_service
+ :members:
+ :inherited-members:
+
+
+.. automodule:: google.cloud.aiplatform_v1beta1.services.pipeline_service.pagers
+ :members:
+ :inherited-members:
diff --git a/docs/aiplatform_v1beta1/prediction_service.rst b/docs/aiplatform_v1beta1/prediction_service.rst
new file mode 100644
index 0000000000..03c1150df0
--- /dev/null
+++ b/docs/aiplatform_v1beta1/prediction_service.rst
@@ -0,0 +1,6 @@
+PredictionService
+-----------------------------------
+
+.. automodule:: google.cloud.aiplatform_v1beta1.services.prediction_service
+ :members:
+ :inherited-members:
diff --git a/docs/aiplatform_v1beta1/services.rst b/docs/aiplatform_v1beta1/services.rst
index 664c9df0a8..dd8c8a41bc 100644
--- a/docs/aiplatform_v1beta1/services.rst
+++ b/docs/aiplatform_v1beta1/services.rst
@@ -1,27 +1,13 @@
Services for Google Cloud Aiplatform v1beta1 API
================================================
+.. toctree::
+ :maxdepth: 2
-.. automodule:: google.cloud.aiplatform_v1beta1.services.dataset_service
- :members:
- :inherited-members:
-.. automodule:: google.cloud.aiplatform_v1beta1.services.endpoint_service
- :members:
- :inherited-members:
-.. automodule:: google.cloud.aiplatform_v1beta1.services.job_service
- :members:
- :inherited-members:
-.. automodule:: google.cloud.aiplatform_v1beta1.services.migration_service
- :members:
- :inherited-members:
-.. automodule:: google.cloud.aiplatform_v1beta1.services.model_service
- :members:
- :inherited-members:
-.. automodule:: google.cloud.aiplatform_v1beta1.services.pipeline_service
- :members:
- :inherited-members:
-.. automodule:: google.cloud.aiplatform_v1beta1.services.prediction_service
- :members:
- :inherited-members:
-.. automodule:: google.cloud.aiplatform_v1beta1.services.specialist_pool_service
- :members:
- :inherited-members:
+ dataset_service
+ endpoint_service
+ job_service
+ migration_service
+ model_service
+ pipeline_service
+ prediction_service
+ specialist_pool_service
diff --git a/docs/aiplatform_v1beta1/specialist_pool_service.rst b/docs/aiplatform_v1beta1/specialist_pool_service.rst
new file mode 100644
index 0000000000..4d264dc256
--- /dev/null
+++ b/docs/aiplatform_v1beta1/specialist_pool_service.rst
@@ -0,0 +1,11 @@
+SpecialistPoolService
+---------------------------------------
+
+.. automodule:: google.cloud.aiplatform_v1beta1.services.specialist_pool_service
+ :members:
+ :inherited-members:
+
+
+.. automodule:: google.cloud.aiplatform_v1beta1.services.specialist_pool_service.pagers
+ :members:
+ :inherited-members:
diff --git a/docs/aiplatform_v1beta1/types.rst b/docs/aiplatform_v1beta1/types.rst
index 19bab68ada..770675f8ea 100644
--- a/docs/aiplatform_v1beta1/types.rst
+++ b/docs/aiplatform_v1beta1/types.rst
@@ -3,4 +3,5 @@ Types for Google Cloud Aiplatform v1beta1 API
.. automodule:: google.cloud.aiplatform_v1beta1.types
:members:
+ :undoc-members:
:show-inheritance:
diff --git a/docs/definition_v1/types.rst b/docs/definition_v1/types.rst
new file mode 100644
index 0000000000..a1df2bce25
--- /dev/null
+++ b/docs/definition_v1/types.rst
@@ -0,0 +1,7 @@
+Types for Google Cloud Aiplatform V1 Schema Trainingjob Definition v1 API
+=========================================================================
+
+.. automodule:: google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/definition_v1beta1/types.rst b/docs/definition_v1beta1/types.rst
index 3f351d03fc..f4fe7a5301 100644
--- a/docs/definition_v1beta1/types.rst
+++ b/docs/definition_v1beta1/types.rst
@@ -3,4 +3,5 @@ Types for Google Cloud Aiplatform V1beta1 Schema Trainingjob Definition v1beta1
.. automodule:: google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types
:members:
+ :undoc-members:
:show-inheritance:
diff --git a/docs/instance_v1/types.rst b/docs/instance_v1/types.rst
new file mode 100644
index 0000000000..564ab013ee
--- /dev/null
+++ b/docs/instance_v1/types.rst
@@ -0,0 +1,7 @@
+Types for Google Cloud Aiplatform V1 Schema Predict Instance v1 API
+===================================================================
+
+.. automodule:: google.cloud.aiplatform.v1.schema.predict.instance_v1.types
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/instance_v1beta1/types.rst b/docs/instance_v1beta1/types.rst
index c52ae4800c..7caa088065 100644
--- a/docs/instance_v1beta1/types.rst
+++ b/docs/instance_v1beta1/types.rst
@@ -3,4 +3,5 @@ Types for Google Cloud Aiplatform V1beta1 Schema Predict Instance v1beta1 API
.. automodule:: google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types
:members:
+ :undoc-members:
:show-inheritance:
diff --git a/docs/params_v1/types.rst b/docs/params_v1/types.rst
new file mode 100644
index 0000000000..956ef5224d
--- /dev/null
+++ b/docs/params_v1/types.rst
@@ -0,0 +1,7 @@
+Types for Google Cloud Aiplatform V1 Schema Predict Params v1 API
+=================================================================
+
+.. automodule:: google.cloud.aiplatform.v1.schema.predict.params_v1.types
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/params_v1beta1/types.rst b/docs/params_v1beta1/types.rst
index ce7a29cb01..722a1d8ba0 100644
--- a/docs/params_v1beta1/types.rst
+++ b/docs/params_v1beta1/types.rst
@@ -3,4 +3,5 @@ Types for Google Cloud Aiplatform V1beta1 Schema Predict Params v1beta1 API
.. automodule:: google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types
:members:
+ :undoc-members:
:show-inheritance:
diff --git a/docs/prediction_v1/types.rst b/docs/prediction_v1/types.rst
new file mode 100644
index 0000000000..a97faf34de
--- /dev/null
+++ b/docs/prediction_v1/types.rst
@@ -0,0 +1,7 @@
+Types for Google Cloud Aiplatform V1 Schema Predict Prediction v1 API
+=====================================================================
+
+.. automodule:: google.cloud.aiplatform.v1.schema.predict.prediction_v1.types
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/prediction_v1beta1/types.rst b/docs/prediction_v1beta1/types.rst
index cdbe7f2842..b14182d6d7 100644
--- a/docs/prediction_v1beta1/types.rst
+++ b/docs/prediction_v1beta1/types.rst
@@ -3,4 +3,5 @@ Types for Google Cloud Aiplatform V1beta1 Schema Predict Prediction v1beta1 API
.. automodule:: google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types
:members:
+ :undoc-members:
:show-inheritance:
diff --git a/google/cloud/aiplatform/gapic/__init__.py b/google/cloud/aiplatform/gapic/__init__.py
index 790ebeffdf..75a9bcd38f 100644
--- a/google/cloud/aiplatform/gapic/__init__.py
+++ b/google/cloud/aiplatform/gapic/__init__.py
@@ -16,9 +16,9 @@
#
# The latest GAPIC version is exported to the google.cloud.aiplatform.gapic namespace.
-from google.cloud.aiplatform_v1beta1 import *
+from google.cloud.aiplatform_v1 import *
from google.cloud.aiplatform.gapic import schema
-from google.cloud import aiplatform_v1beta1 as v1beta1
+from google.cloud import aiplatform_v1 as v1
__all__ = ()
diff --git a/google/cloud/aiplatform/gapic/schema/__init__.py b/google/cloud/aiplatform/gapic/schema/__init__.py
index 01fd26b30a..e726749c77 100644
--- a/google/cloud/aiplatform/gapic/schema/__init__.py
+++ b/google/cloud/aiplatform/gapic/schema/__init__.py
@@ -14,12 +14,51 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from google.cloud.aiplatform.helpers import _decorators
+from google.cloud.aiplatform.v1.schema import predict
+from google.cloud.aiplatform.v1.schema import trainingjob
+from google.cloud.aiplatform.v1beta1.schema import predict as predict_v1beta1
+from google.cloud.aiplatform.v1beta1.schema import predict as trainingjob_v1beta1
-from google.cloud.aiplatform.v1beta1.schema import predict
-from google.cloud.aiplatform.v1beta1.schema import trainingjob
+# import the v1 submodules for enhancement
+from google.cloud.aiplatform.v1.schema.predict.instance_v1 import types as instance
+from google.cloud.aiplatform.v1.schema.predict.params_v1 import types as params
+from google.cloud.aiplatform.v1.schema.predict.prediction_v1 import types as prediction
+from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1 import (
+ types as definition,
+)
+# import the v1beta1 submodules for enhancement
+from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1 import (
+ types as instance_v1beta1,
+)
+from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1 import (
+ types as params_v1beta1,
+)
+from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1 import (
+ types as prediction_v1beta1,
+)
+from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1 import (
+ types as definition_v1beta1,
+)
__all__ = (
"predict",
"trainingjob",
+ "predict_v1beta1",
+ "trainingjob_v1beta1",
)
+
+enhanced_types_packages = [
+ instance,
+ params,
+ prediction,
+ definition,
+ instance_v1beta1,
+ params_v1beta1,
+ prediction_v1beta1,
+ definition_v1beta1,
+]
+
+for pkg in enhanced_types_packages:
+ _decorators._add_methods_to_classes_in_package(pkg)
diff --git a/google/cloud/aiplatform/v1/schema/__init__.py b/google/cloud/aiplatform/v1/schema/__init__.py
new file mode 100644
index 0000000000..579513a95f
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/__init__.py
@@ -0,0 +1,25 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from google.cloud.aiplatform.v1.schema import predict
+from google.cloud.aiplatform.v1.schema import trainingjob
+
+
+__all__ = (
+ "predict",
+ "trainingjob",
+)
diff --git a/google/cloud/aiplatform/v1/schema/predict/__init__.py b/google/cloud/aiplatform/v1/schema/predict/__init__.py
new file mode 100644
index 0000000000..36277d5ae8
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/predict/__init__.py
@@ -0,0 +1,26 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from google.cloud.aiplatform.v1.schema.predict import instance
+from google.cloud.aiplatform.v1.schema.predict import params
+from google.cloud.aiplatform.v1.schema.predict import prediction
+
+__all__ = (
+ "instance",
+ "params",
+ "prediction",
+)
diff --git a/google/cloud/aiplatform/v1/schema/predict/instance/__init__.py b/google/cloud/aiplatform/v1/schema/predict/instance/__init__.py
new file mode 100644
index 0000000000..fb2668afb5
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/predict/instance/__init__.py
@@ -0,0 +1,56 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.image_classification import (
+ ImageClassificationPredictionInstance,
+)
+from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.image_object_detection import (
+ ImageObjectDetectionPredictionInstance,
+)
+from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.image_segmentation import (
+ ImageSegmentationPredictionInstance,
+)
+from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.text_classification import (
+ TextClassificationPredictionInstance,
+)
+from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.text_extraction import (
+ TextExtractionPredictionInstance,
+)
+from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.text_sentiment import (
+ TextSentimentPredictionInstance,
+)
+from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.video_action_recognition import (
+ VideoActionRecognitionPredictionInstance,
+)
+from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.video_classification import (
+ VideoClassificationPredictionInstance,
+)
+from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.video_object_tracking import (
+ VideoObjectTrackingPredictionInstance,
+)
+
+__all__ = (
+ "ImageClassificationPredictionInstance",
+ "ImageObjectDetectionPredictionInstance",
+ "ImageSegmentationPredictionInstance",
+ "TextClassificationPredictionInstance",
+ "TextExtractionPredictionInstance",
+ "TextSentimentPredictionInstance",
+ "VideoActionRecognitionPredictionInstance",
+ "VideoClassificationPredictionInstance",
+ "VideoObjectTrackingPredictionInstance",
+)
diff --git a/google/cloud/aiplatform/v1/schema/predict/instance/py.typed b/google/cloud/aiplatform/v1/schema/predict/instance/py.typed
new file mode 100644
index 0000000000..f70e7f605a
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/predict/instance/py.typed
@@ -0,0 +1,2 @@
+# Marker file for PEP 561.
+# The google-cloud-aiplatform-v1-schema-predict-instance package uses inline types.
diff --git a/google/cloud/aiplatform/v1/schema/predict/instance_v1/__init__.py b/google/cloud/aiplatform/v1/schema/predict/instance_v1/__init__.py
new file mode 100644
index 0000000000..f6d9a128ad
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/predict/instance_v1/__init__.py
@@ -0,0 +1,39 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from .types.image_classification import ImageClassificationPredictionInstance
+from .types.image_object_detection import ImageObjectDetectionPredictionInstance
+from .types.image_segmentation import ImageSegmentationPredictionInstance
+from .types.text_classification import TextClassificationPredictionInstance
+from .types.text_extraction import TextExtractionPredictionInstance
+from .types.text_sentiment import TextSentimentPredictionInstance
+from .types.video_action_recognition import VideoActionRecognitionPredictionInstance
+from .types.video_classification import VideoClassificationPredictionInstance
+from .types.video_object_tracking import VideoObjectTrackingPredictionInstance
+
+
+__all__ = (
+ "ImageObjectDetectionPredictionInstance",
+ "ImageSegmentationPredictionInstance",
+ "TextClassificationPredictionInstance",
+ "TextExtractionPredictionInstance",
+ "TextSentimentPredictionInstance",
+ "VideoActionRecognitionPredictionInstance",
+ "VideoClassificationPredictionInstance",
+ "VideoObjectTrackingPredictionInstance",
+ "ImageClassificationPredictionInstance",
+)
diff --git a/google/cloud/aiplatform/v1/schema/predict/instance_v1/py.typed b/google/cloud/aiplatform/v1/schema/predict/instance_v1/py.typed
new file mode 100644
index 0000000000..f70e7f605a
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/predict/instance_v1/py.typed
@@ -0,0 +1,2 @@
+# Marker file for PEP 561.
+# The google-cloud-aiplatform-v1-schema-predict-instance package uses inline types.
diff --git a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/__init__.py b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/__init__.py
new file mode 100644
index 0000000000..041fe6cdb1
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/__init__.py
@@ -0,0 +1,38 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from .image_classification import ImageClassificationPredictionInstance
+from .image_object_detection import ImageObjectDetectionPredictionInstance
+from .image_segmentation import ImageSegmentationPredictionInstance
+from .text_classification import TextClassificationPredictionInstance
+from .text_extraction import TextExtractionPredictionInstance
+from .text_sentiment import TextSentimentPredictionInstance
+from .video_action_recognition import VideoActionRecognitionPredictionInstance
+from .video_classification import VideoClassificationPredictionInstance
+from .video_object_tracking import VideoObjectTrackingPredictionInstance
+
+__all__ = (
+ "ImageClassificationPredictionInstance",
+ "ImageObjectDetectionPredictionInstance",
+ "ImageSegmentationPredictionInstance",
+ "TextClassificationPredictionInstance",
+ "TextExtractionPredictionInstance",
+ "TextSentimentPredictionInstance",
+ "VideoActionRecognitionPredictionInstance",
+ "VideoClassificationPredictionInstance",
+ "VideoObjectTrackingPredictionInstance",
+)
diff --git a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_classification.py b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_classification.py
new file mode 100644
index 0000000000..b5fa9b4dbf
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_classification.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1.schema.predict.instance",
+ manifest={"ImageClassificationPredictionInstance",},
+)
+
+
+class ImageClassificationPredictionInstance(proto.Message):
+ r"""Prediction input format for Image Classification.
+
+ Attributes:
+ content (str):
+ The image bytes or GCS URI to make the
+ prediction on.
+ mime_type (str):
+ The MIME type of the content of the image.
+ Only the images in below listed MIME types are
+ supported. - image/jpeg
+ - image/gif
+ - image/png
+ - image/webp
+ - image/bmp
+ - image/tiff
+ - image/vnd.microsoft.icon
+ """
+
+ content = proto.Field(proto.STRING, number=1)
+
+ mime_type = proto.Field(proto.STRING, number=2)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_object_detection.py b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_object_detection.py
new file mode 100644
index 0000000000..45752ce7e2
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_object_detection.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1.schema.predict.instance",
+ manifest={"ImageObjectDetectionPredictionInstance",},
+)
+
+
+class ImageObjectDetectionPredictionInstance(proto.Message):
+ r"""Prediction input format for Image Object Detection.
+
+ Attributes:
+ content (str):
+ The image bytes or GCS URI to make the
+ prediction on.
+ mime_type (str):
+ The MIME type of the content of the image.
+ Only the images in below listed MIME types are
+ supported. - image/jpeg
+ - image/gif
+ - image/png
+ - image/webp
+ - image/bmp
+ - image/tiff
+ - image/vnd.microsoft.icon
+ """
+
+ content = proto.Field(proto.STRING, number=1)
+
+ mime_type = proto.Field(proto.STRING, number=2)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_segmentation.py b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_segmentation.py
new file mode 100644
index 0000000000..cb436d7029
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_segmentation.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1.schema.predict.instance",
+ manifest={"ImageSegmentationPredictionInstance",},
+)
+
+
+class ImageSegmentationPredictionInstance(proto.Message):
+ r"""Prediction input format for Image Segmentation.
+
+ Attributes:
+ content (str):
+ The image bytes to make the predictions on.
+ mime_type (str):
+ The MIME type of the content of the image.
+ Only the images in below listed MIME types are
+ supported. - image/jpeg
+ - image/png
+ """
+
+ content = proto.Field(proto.STRING, number=1)
+
+ mime_type = proto.Field(proto.STRING, number=2)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_classification.py b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_classification.py
new file mode 100644
index 0000000000..ceff5308b7
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_classification.py
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1.schema.predict.instance",
+ manifest={"TextClassificationPredictionInstance",},
+)
+
+
+class TextClassificationPredictionInstance(proto.Message):
+ r"""Prediction input format for Text Classification.
+
+ Attributes:
+ content (str):
+ The text snippet to make the predictions on.
+ mime_type (str):
+ The MIME type of the text snippet. The
+ supported MIME types are listed below.
+ - text/plain
+ """
+
+ content = proto.Field(proto.STRING, number=1)
+
+ mime_type = proto.Field(proto.STRING, number=2)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_extraction.py b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_extraction.py
new file mode 100644
index 0000000000..2e96216466
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_extraction.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1.schema.predict.instance",
+ manifest={"TextExtractionPredictionInstance",},
+)
+
+
+class TextExtractionPredictionInstance(proto.Message):
+ r"""Prediction input format for Text Extraction.
+
+ Attributes:
+ content (str):
+ The text snippet to make the predictions on.
+ mime_type (str):
+ The MIME type of the text snippet. The
+ supported MIME types are listed below.
+ - text/plain
+ key (str):
+ This field is only used for batch prediction.
+ If a key is provided, the batch prediction
+ result will by mapped to this key. If omitted,
+ then the batch prediction result will contain
+ the entire input instance. AI Platform will not
+ check if keys in the request are duplicates, so
+ it is up to the caller to ensure the keys are
+ unique.
+ """
+
+ content = proto.Field(proto.STRING, number=1)
+
+ mime_type = proto.Field(proto.STRING, number=2)
+
+ key = proto.Field(proto.STRING, number=3)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py
new file mode 100644
index 0000000000..37353ad806
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1.schema.predict.instance",
+ manifest={"TextSentimentPredictionInstance",},
+)
+
+
+class TextSentimentPredictionInstance(proto.Message):
+ r"""Prediction input format for Text Sentiment.
+
+ Attributes:
+ content (str):
+ The text snippet to make the predictions on.
+ mime_type (str):
+ The MIME type of the text snippet. The
+ supported MIME types are listed below.
+ - text/plain
+ """
+
+ content = proto.Field(proto.STRING, number=1)
+
+ mime_type = proto.Field(proto.STRING, number=2)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_action_recognition.py b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_action_recognition.py
new file mode 100644
index 0000000000..6de5665312
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_action_recognition.py
@@ -0,0 +1,64 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1.schema.predict.instance",
+ manifest={"VideoActionRecognitionPredictionInstance",},
+)
+
+
+class VideoActionRecognitionPredictionInstance(proto.Message):
+ r"""Prediction input format for Video Action Recognition.
+
+ Attributes:
+ content (str):
+ The Google Cloud Storage location of the
+ video on which to perform the prediction.
+ mime_type (str):
+ The MIME type of the content of the video.
+ Only the following are supported: video/mp4
+ video/avi video/quicktime
+ time_segment_start (str):
+ The beginning, inclusive, of the video's time
+ segment on which to perform the prediction.
+ Expressed as a number of seconds as measured
+ from the start of the video, with "s" appended
+ at the end. Fractions are allowed, up to a
+ microsecond precision.
+ time_segment_end (str):
+ The end, exclusive, of the video's time
+ segment on which to perform the prediction.
+ Expressed as a number of seconds as measured
+ from the start of the video, with "s" appended
+ at the end. Fractions are allowed, up to a
+ microsecond precision, and "inf" or "Infinity"
+ is allowed, which means the end of the video.
+ """
+
+ content = proto.Field(proto.STRING, number=1)
+
+ mime_type = proto.Field(proto.STRING, number=2)
+
+ time_segment_start = proto.Field(proto.STRING, number=3)
+
+ time_segment_end = proto.Field(proto.STRING, number=4)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py
new file mode 100644
index 0000000000..ab7c0edfe1
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py
@@ -0,0 +1,64 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1.schema.predict.instance",
+ manifest={"VideoClassificationPredictionInstance",},
+)
+
+
+class VideoClassificationPredictionInstance(proto.Message):
+ r"""Prediction input format for Video Classification.
+
+ Attributes:
+ content (str):
+ The Google Cloud Storage location of the
+ video on which to perform the prediction.
+ mime_type (str):
+ The MIME type of the content of the video.
+ Only the following are supported: video/mp4
+ video/avi video/quicktime
+ time_segment_start (str):
+ The beginning, inclusive, of the video's time
+ segment on which to perform the prediction.
+ Expressed as a number of seconds as measured
+ from the start of the video, with "s" appended
+ at the end. Fractions are allowed, up to a
+ microsecond precision.
+ time_segment_end (str):
+ The end, exclusive, of the video's time
+ segment on which to perform the prediction.
+ Expressed as a number of seconds as measured
+ from the start of the video, with "s" appended
+ at the end. Fractions are allowed, up to a
+ microsecond precision, and "inf" or "Infinity"
+ is allowed, which means the end of the video.
+ """
+
+ content = proto.Field(proto.STRING, number=1)
+
+ mime_type = proto.Field(proto.STRING, number=2)
+
+ time_segment_start = proto.Field(proto.STRING, number=3)
+
+ time_segment_end = proto.Field(proto.STRING, number=4)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_object_tracking.py b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_object_tracking.py
new file mode 100644
index 0000000000..f797f58f4e
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_object_tracking.py
@@ -0,0 +1,64 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1.schema.predict.instance",
+ manifest={"VideoObjectTrackingPredictionInstance",},
+)
+
+
+class VideoObjectTrackingPredictionInstance(proto.Message):
+ r"""Prediction input format for Video Object Tracking.
+
+ Attributes:
+ content (str):
+ The Google Cloud Storage location of the
+ video on which to perform the prediction.
+ mime_type (str):
+ The MIME type of the content of the video.
+ Only the following are supported: video/mp4
+ video/avi video/quicktime
+ time_segment_start (str):
+ The beginning, inclusive, of the video's time
+ segment on which to perform the prediction.
+ Expressed as a number of seconds as measured
+ from the start of the video, with "s" appended
+ at the end. Fractions are allowed, up to a
+ microsecond precision.
+ time_segment_end (str):
+ The end, exclusive, of the video's time
+ segment on which to perform the prediction.
+ Expressed as a number of seconds as measured
+ from the start of the video, with "s" appended
+ at the end. Fractions are allowed, up to a
+ microsecond precision, and "inf" or "Infinity"
+ is allowed, which means the end of the video.
+ """
+
+ content = proto.Field(proto.STRING, number=1)
+
+ mime_type = proto.Field(proto.STRING, number=2)
+
+ time_segment_start = proto.Field(proto.STRING, number=3)
+
+ time_segment_end = proto.Field(proto.STRING, number=4)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform/v1/schema/predict/params/__init__.py b/google/cloud/aiplatform/v1/schema/predict/params/__init__.py
new file mode 100644
index 0000000000..c046f4d7e5
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/predict/params/__init__.py
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from google.cloud.aiplatform.v1.schema.predict.params_v1.types.image_classification import (
+ ImageClassificationPredictionParams,
+)
+from google.cloud.aiplatform.v1.schema.predict.params_v1.types.image_object_detection import (
+ ImageObjectDetectionPredictionParams,
+)
+from google.cloud.aiplatform.v1.schema.predict.params_v1.types.image_segmentation import (
+ ImageSegmentationPredictionParams,
+)
+from google.cloud.aiplatform.v1.schema.predict.params_v1.types.video_action_recognition import (
+ VideoActionRecognitionPredictionParams,
+)
+from google.cloud.aiplatform.v1.schema.predict.params_v1.types.video_classification import (
+ VideoClassificationPredictionParams,
+)
+from google.cloud.aiplatform.v1.schema.predict.params_v1.types.video_object_tracking import (
+ VideoObjectTrackingPredictionParams,
+)
+
+__all__ = (
+ "ImageClassificationPredictionParams",
+ "ImageObjectDetectionPredictionParams",
+ "ImageSegmentationPredictionParams",
+ "VideoActionRecognitionPredictionParams",
+ "VideoClassificationPredictionParams",
+ "VideoObjectTrackingPredictionParams",
+)
diff --git a/google/cloud/aiplatform/v1/schema/predict/params/py.typed b/google/cloud/aiplatform/v1/schema/predict/params/py.typed
new file mode 100644
index 0000000000..df96e61590
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/predict/params/py.typed
@@ -0,0 +1,2 @@
+# Marker file for PEP 561.
+# The google-cloud-aiplatform-v1-schema-predict-params package uses inline types.
diff --git a/google/cloud/aiplatform/v1/schema/predict/params_v1/__init__.py b/google/cloud/aiplatform/v1/schema/predict/params_v1/__init__.py
new file mode 100644
index 0000000000..79fb1c2097
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/predict/params_v1/__init__.py
@@ -0,0 +1,33 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from .types.image_classification import ImageClassificationPredictionParams
+from .types.image_object_detection import ImageObjectDetectionPredictionParams
+from .types.image_segmentation import ImageSegmentationPredictionParams
+from .types.video_action_recognition import VideoActionRecognitionPredictionParams
+from .types.video_classification import VideoClassificationPredictionParams
+from .types.video_object_tracking import VideoObjectTrackingPredictionParams
+
+
+__all__ = (
+ "ImageObjectDetectionPredictionParams",
+ "ImageSegmentationPredictionParams",
+ "VideoActionRecognitionPredictionParams",
+ "VideoClassificationPredictionParams",
+ "VideoObjectTrackingPredictionParams",
+ "ImageClassificationPredictionParams",
+)
diff --git a/google/cloud/aiplatform/v1/schema/predict/params_v1/py.typed b/google/cloud/aiplatform/v1/schema/predict/params_v1/py.typed
new file mode 100644
index 0000000000..df96e61590
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/predict/params_v1/py.typed
@@ -0,0 +1,2 @@
+# Marker file for PEP 561.
+# The google-cloud-aiplatform-v1-schema-predict-params package uses inline types.
diff --git a/google/cloud/aiplatform/v1/schema/predict/params_v1/types/__init__.py b/google/cloud/aiplatform/v1/schema/predict/params_v1/types/__init__.py
new file mode 100644
index 0000000000..2f2c29bba5
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/predict/params_v1/types/__init__.py
@@ -0,0 +1,32 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from .image_classification import ImageClassificationPredictionParams
+from .image_object_detection import ImageObjectDetectionPredictionParams
+from .image_segmentation import ImageSegmentationPredictionParams
+from .video_action_recognition import VideoActionRecognitionPredictionParams
+from .video_classification import VideoClassificationPredictionParams
+from .video_object_tracking import VideoObjectTrackingPredictionParams
+
+__all__ = (
+ "ImageClassificationPredictionParams",
+ "ImageObjectDetectionPredictionParams",
+ "ImageSegmentationPredictionParams",
+ "VideoActionRecognitionPredictionParams",
+ "VideoClassificationPredictionParams",
+ "VideoObjectTrackingPredictionParams",
+)
diff --git a/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_classification.py b/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_classification.py
new file mode 100644
index 0000000000..3a9efd0ea2
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_classification.py
@@ -0,0 +1,47 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1.schema.predict.params",
+ manifest={"ImageClassificationPredictionParams",},
+)
+
+
+class ImageClassificationPredictionParams(proto.Message):
+ r"""Prediction model parameters for Image Classification.
+
+ Attributes:
+ confidence_threshold (float):
+ The Model only returns predictions with at
+ least this confidence score. Default value is
+ 0.0
+ max_predictions (int):
+ The Model only returns up to that many top,
+ by confidence score, predictions per instance.
+ If this number is very high, the Model may
+ return fewer predictions. Default value is 10.
+ """
+
+ confidence_threshold = proto.Field(proto.FLOAT, number=1)
+
+ max_predictions = proto.Field(proto.INT32, number=2)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_object_detection.py b/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_object_detection.py
new file mode 100644
index 0000000000..c37507a4e0
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_object_detection.py
@@ -0,0 +1,48 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1.schema.predict.params",
+ manifest={"ImageObjectDetectionPredictionParams",},
+)
+
+
+class ImageObjectDetectionPredictionParams(proto.Message):
+ r"""Prediction model parameters for Image Object Detection.
+
+ Attributes:
+ confidence_threshold (float):
+ The Model only returns predictions with at
+ least this confidence score. Default value is
+ 0.0
+ max_predictions (int):
+ The Model only returns up to that many top,
+ by confidence score, predictions per instance.
+ Note that number of returned predictions is also
+ limited by metadata's predictionsLimit. Default
+ value is 10.
+ """
+
+ confidence_threshold = proto.Field(proto.FLOAT, number=1)
+
+ max_predictions = proto.Field(proto.INT32, number=2)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_segmentation.py b/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_segmentation.py
new file mode 100644
index 0000000000..108cff107b
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_segmentation.py
@@ -0,0 +1,42 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1.schema.predict.params",
+ manifest={"ImageSegmentationPredictionParams",},
+)
+
+
+class ImageSegmentationPredictionParams(proto.Message):
+ r"""Prediction model parameters for Image Segmentation.
+
+ Attributes:
+ confidence_threshold (float):
+ When the model predicts category of pixels of
+ the image, it will only provide predictions for
+ pixels that it is at least this much confident
+ about. All other pixels will be classified as
+ background. Default value is 0.5.
+ """
+
+ confidence_threshold = proto.Field(proto.FLOAT, number=1)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_action_recognition.py b/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_action_recognition.py
new file mode 100644
index 0000000000..66f1f19e76
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_action_recognition.py
@@ -0,0 +1,48 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1.schema.predict.params",
+ manifest={"VideoActionRecognitionPredictionParams",},
+)
+
+
+class VideoActionRecognitionPredictionParams(proto.Message):
+ r"""Prediction model parameters for Video Action Recognition.
+
+ Attributes:
+ confidence_threshold (float):
+ The Model only returns predictions with at
+ least this confidence score. Default value is
+ 0.0
+ max_predictions (int):
+ The model only returns up to that many top,
+ by confidence score, predictions per frame of
+ the video. If this number is very high, the
+ Model may return fewer predictions per frame.
+ Default value is 50.
+ """
+
+ confidence_threshold = proto.Field(proto.FLOAT, number=1)
+
+ max_predictions = proto.Field(proto.INT32, number=2)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_classification.py b/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_classification.py
new file mode 100644
index 0000000000..bfe8df9f5c
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_classification.py
@@ -0,0 +1,85 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1.schema.predict.params",
+ manifest={"VideoClassificationPredictionParams",},
+)
+
+
+class VideoClassificationPredictionParams(proto.Message):
+ r"""Prediction model parameters for Video Classification.
+
+ Attributes:
+ confidence_threshold (float):
+ The Model only returns predictions with at
+ least this confidence score. Default value is
+ 0.0
+ max_predictions (int):
+ The Model only returns up to that many top,
+ by confidence score, predictions per instance.
+ If this number is very high, the Model may
+ return fewer predictions. Default value is
+ 10,000.
+ segment_classification (bool):
+ Set to true to request segment-level
+ classification. AI Platform returns labels and
+ their confidence scores for the entire time
+ segment of the video that user specified in the
+ input instance. Default value is true
+ shot_classification (bool):
+ Set to true to request shot-level
+ classification. AI Platform determines the
+ boundaries for each camera shot in the entire
+ time segment of the video that user specified in
+ the input instance. AI Platform then returns
+ labels and their confidence scores for each
+ detected shot, along with the start and end time
+ of the shot.
+ WARNING: Model evaluation is not done for this
+ classification type, the quality of it depends
+ on the training data, but there are no metrics
+ provided to describe that quality.
+ Default value is false
+ one_sec_interval_classification (bool):
+ Set to true to request classification for a
+ video at one-second intervals. AI Platform
+ returns labels and their confidence scores for
+ each second of the entire time segment of the
+ video that user specified in the input WARNING:
+ Model evaluation is not done for this
+ classification type, the quality of it depends
+ on the training data, but there are no metrics
+ provided to describe that quality. Default value
+ is false
+ """
+
+ confidence_threshold = proto.Field(proto.FLOAT, number=1)
+
+ max_predictions = proto.Field(proto.INT32, number=2)
+
+ segment_classification = proto.Field(proto.BOOL, number=3)
+
+ shot_classification = proto.Field(proto.BOOL, number=4)
+
+ one_sec_interval_classification = proto.Field(proto.BOOL, number=5)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_object_tracking.py b/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_object_tracking.py
new file mode 100644
index 0000000000..899de1050a
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_object_tracking.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1.schema.predict.params",
+ manifest={"VideoObjectTrackingPredictionParams",},
+)
+
+
+class VideoObjectTrackingPredictionParams(proto.Message):
+ r"""Prediction model parameters for Video Object Tracking.
+
+ Attributes:
+ confidence_threshold (float):
+ The Model only returns predictions with at
+ least this confidence score. Default value is
+ 0.0
+ max_predictions (int):
+ The model only returns up to that many top,
+ by confidence score, predictions per frame of
+ the video. If this number is very high, the
+ Model may return fewer predictions per frame.
+ Default value is 50.
+ min_bounding_box_size (float):
+ Only bounding boxes with shortest edge at
+ least that long as a relative value of video
+ frame size are returned. Default value is 0.0.
+ """
+
+ confidence_threshold = proto.Field(proto.FLOAT, number=1)
+
+ max_predictions = proto.Field(proto.INT32, number=2)
+
+ min_bounding_box_size = proto.Field(proto.FLOAT, number=3)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction/__init__.py b/google/cloud/aiplatform/v1/schema/predict/prediction/__init__.py
new file mode 100644
index 0000000000..d8e2b782c2
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/predict/prediction/__init__.py
@@ -0,0 +1,60 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.classification import (
+ ClassificationPredictionResult,
+)
+from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.image_object_detection import (
+ ImageObjectDetectionPredictionResult,
+)
+from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.image_segmentation import (
+ ImageSegmentationPredictionResult,
+)
+from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.tabular_classification import (
+ TabularClassificationPredictionResult,
+)
+from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.tabular_regression import (
+ TabularRegressionPredictionResult,
+)
+from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.text_extraction import (
+ TextExtractionPredictionResult,
+)
+from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.text_sentiment import (
+ TextSentimentPredictionResult,
+)
+from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.video_action_recognition import (
+ VideoActionRecognitionPredictionResult,
+)
+from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.video_classification import (
+ VideoClassificationPredictionResult,
+)
+from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.video_object_tracking import (
+ VideoObjectTrackingPredictionResult,
+)
+
+__all__ = (
+ "ClassificationPredictionResult",
+ "ImageObjectDetectionPredictionResult",
+ "ImageSegmentationPredictionResult",
+ "TabularClassificationPredictionResult",
+ "TabularRegressionPredictionResult",
+ "TextExtractionPredictionResult",
+ "TextSentimentPredictionResult",
+ "VideoActionRecognitionPredictionResult",
+ "VideoClassificationPredictionResult",
+ "VideoObjectTrackingPredictionResult",
+)
diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction/py.typed b/google/cloud/aiplatform/v1/schema/predict/prediction/py.typed
new file mode 100644
index 0000000000..472fa4d8cc
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/predict/prediction/py.typed
@@ -0,0 +1,2 @@
+# Marker file for PEP 561.
+# The google-cloud-aiplatform-v1-schema-predict-prediction package uses inline types.
diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/__init__.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/__init__.py
new file mode 100644
index 0000000000..91fae5a3b1
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/__init__.py
@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from .types.classification import ClassificationPredictionResult
+from .types.image_object_detection import ImageObjectDetectionPredictionResult
+from .types.image_segmentation import ImageSegmentationPredictionResult
+from .types.tabular_classification import TabularClassificationPredictionResult
+from .types.tabular_regression import TabularRegressionPredictionResult
+from .types.text_extraction import TextExtractionPredictionResult
+from .types.text_sentiment import TextSentimentPredictionResult
+from .types.video_action_recognition import VideoActionRecognitionPredictionResult
+from .types.video_classification import VideoClassificationPredictionResult
+from .types.video_object_tracking import VideoObjectTrackingPredictionResult
+
+
+__all__ = (
+ "ImageObjectDetectionPredictionResult",
+ "ImageSegmentationPredictionResult",
+ "TabularClassificationPredictionResult",
+ "TabularRegressionPredictionResult",
+ "TextExtractionPredictionResult",
+ "TextSentimentPredictionResult",
+ "VideoActionRecognitionPredictionResult",
+ "VideoClassificationPredictionResult",
+ "VideoObjectTrackingPredictionResult",
+ "ClassificationPredictionResult",
+)
diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/py.typed b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/py.typed
new file mode 100644
index 0000000000..472fa4d8cc
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/py.typed
@@ -0,0 +1,2 @@
+# Marker file for PEP 561.
+# The google-cloud-aiplatform-v1-schema-predict-prediction package uses inline types.
diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/__init__.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/__init__.py
new file mode 100644
index 0000000000..a0fd2058e0
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/__init__.py
@@ -0,0 +1,40 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from .classification import ClassificationPredictionResult
+from .image_object_detection import ImageObjectDetectionPredictionResult
+from .image_segmentation import ImageSegmentationPredictionResult
+from .tabular_classification import TabularClassificationPredictionResult
+from .tabular_regression import TabularRegressionPredictionResult
+from .text_extraction import TextExtractionPredictionResult
+from .text_sentiment import TextSentimentPredictionResult
+from .video_action_recognition import VideoActionRecognitionPredictionResult
+from .video_classification import VideoClassificationPredictionResult
+from .video_object_tracking import VideoObjectTrackingPredictionResult
+
+__all__ = (
+ "ClassificationPredictionResult",
+ "ImageObjectDetectionPredictionResult",
+ "ImageSegmentationPredictionResult",
+ "TabularClassificationPredictionResult",
+ "TabularRegressionPredictionResult",
+ "TextExtractionPredictionResult",
+ "TextSentimentPredictionResult",
+ "VideoActionRecognitionPredictionResult",
+ "VideoClassificationPredictionResult",
+ "VideoObjectTrackingPredictionResult",
+)
diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/classification.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/classification.py
new file mode 100644
index 0000000000..cfc8e2e602
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/classification.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1.schema.predict.prediction",
+ manifest={"ClassificationPredictionResult",},
+)
+
+
+class ClassificationPredictionResult(proto.Message):
+ r"""Prediction output format for Image and Text Classification.
+
+ Attributes:
+ ids (Sequence[int]):
+ The resource IDs of the AnnotationSpecs that
+ had been identified, ordered by the confidence
+ score descendingly.
+ display_names (Sequence[str]):
+ The display names of the AnnotationSpecs that
+ had been identified, order matches the IDs.
+ confidences (Sequence[float]):
+ The Model's confidences in correctness of the
+ predicted IDs, higher value means higher
+ confidence. Order matches the Ids.
+ """
+
+ ids = proto.RepeatedField(proto.INT64, number=1)
+
+ display_names = proto.RepeatedField(proto.STRING, number=2)
+
+ confidences = proto.RepeatedField(proto.FLOAT, number=3)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py
new file mode 100644
index 0000000000..31d37010db
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py
@@ -0,0 +1,64 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+from google.protobuf import struct_pb2 as struct # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1.schema.predict.prediction",
+ manifest={"ImageObjectDetectionPredictionResult",},
+)
+
+
+class ImageObjectDetectionPredictionResult(proto.Message):
+ r"""Prediction output format for Image Object Detection.
+
+ Attributes:
+ ids (Sequence[int]):
+ The resource IDs of the AnnotationSpecs that
+ had been identified, ordered by the confidence
+ score descendingly.
+ display_names (Sequence[str]):
+ The display names of the AnnotationSpecs that
+ had been identified, order matches the IDs.
+ confidences (Sequence[float]):
+ The Model's confidences in correctness of the
+ predicted IDs, higher value means higher
+ confidence. Order matches the Ids.
+ bboxes (Sequence[google.protobuf.struct_pb2.ListValue]):
+ Bounding boxes, i.e. the rectangles over the image, that
+ pinpoint the found AnnotationSpecs. Given in order that
+ matches the IDs. Each bounding box is an array of 4 numbers
+ ``xMin``, ``xMax``, ``yMin``, and ``yMax``, which represent
+ the extremal coordinates of the box. They are relative to
+ the image size, and the point 0,0 is in the top left of the
+ image.
+ """
+
+ ids = proto.RepeatedField(proto.INT64, number=1)
+
+ display_names = proto.RepeatedField(proto.STRING, number=2)
+
+ confidences = proto.RepeatedField(proto.FLOAT, number=3)
+
+ bboxes = proto.RepeatedField(proto.MESSAGE, number=4, message=struct.ListValue,)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_segmentation.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_segmentation.py
new file mode 100644
index 0000000000..1261f19723
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_segmentation.py
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1.schema.predict.prediction",
+ manifest={"ImageSegmentationPredictionResult",},
+)
+
+
+class ImageSegmentationPredictionResult(proto.Message):
+ r"""Prediction output format for Image Segmentation.
+
+ Attributes:
+ category_mask (str):
+ A PNG image where each pixel in the mask
+ represents the category in which the pixel in
+ the original image was predicted to belong to.
+ The size of this image will be the same as the
+ original image. The mapping between the
+ AnntoationSpec and the color can be found in
+ model's metadata. The model will choose the most
+ likely category and if none of the categories
+ reach the confidence threshold, the pixel will
+ be marked as background.
+ confidence_mask (str):
+ A one channel image which is encoded as an
+ 8bit lossless PNG. The size of the image will be
+ the same as the original image. For a specific
+ pixel, darker color means less confidence in
+ correctness of the cateogry in the categoryMask
+ for the corresponding pixel. Black means no
+ confidence and white means complete confidence.
+ """
+
+ category_mask = proto.Field(proto.STRING, number=1)
+
+ confidence_mask = proto.Field(proto.STRING, number=2)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_classification.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_classification.py
new file mode 100644
index 0000000000..7e78051467
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_classification.py
@@ -0,0 +1,47 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1.schema.predict.prediction",
+ manifest={"TabularClassificationPredictionResult",},
+)
+
+
+class TabularClassificationPredictionResult(proto.Message):
+ r"""Prediction output format for Tabular Classification.
+
+ Attributes:
+ classes (Sequence[str]):
+ The name of the classes being classified,
+ contains all possible values of the target
+ column.
+ scores (Sequence[float]):
+ The model's confidence in each class being
+ correct, higher value means higher confidence.
+ The N-th score corresponds to the N-th class in
+ classes.
+ """
+
+ classes = proto.RepeatedField(proto.STRING, number=1)
+
+ scores = proto.RepeatedField(proto.FLOAT, number=2)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_regression.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_regression.py
new file mode 100644
index 0000000000..c813f3e45c
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_regression.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1.schema.predict.prediction",
+ manifest={"TabularRegressionPredictionResult",},
+)
+
+
+class TabularRegressionPredictionResult(proto.Message):
+ r"""Prediction output format for Tabular Regression.
+
+ Attributes:
+ value (float):
+ The regression value.
+ lower_bound (float):
+ The lower bound of the prediction interval.
+ upper_bound (float):
+ The upper bound of the prediction interval.
+ """
+
+ value = proto.Field(proto.FLOAT, number=1)
+
+ lower_bound = proto.Field(proto.FLOAT, number=2)
+
+ upper_bound = proto.Field(proto.FLOAT, number=3)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_extraction.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_extraction.py
new file mode 100644
index 0000000000..201f10d08a
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_extraction.py
@@ -0,0 +1,67 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1.schema.predict.prediction",
+ manifest={"TextExtractionPredictionResult",},
+)
+
+
+class TextExtractionPredictionResult(proto.Message):
+ r"""Prediction output format for Text Extraction.
+
+ Attributes:
+ ids (Sequence[int]):
+ The resource IDs of the AnnotationSpecs that
+ had been identified, ordered by the confidence
+ score descendingly.
+ display_names (Sequence[str]):
+ The display names of the AnnotationSpecs that
+ had been identified, order matches the IDs.
+ text_segment_start_offsets (Sequence[int]):
+ The start offsets, inclusive, of the text
+ segment in which the AnnotationSpec has been
+ identified. Expressed as a zero-based number of
+ characters as measured from the start of the
+ text snippet.
+ text_segment_end_offsets (Sequence[int]):
+ The end offsets, inclusive, of the text
+ segment in which the AnnotationSpec has been
+ identified. Expressed as a zero-based number of
+ characters as measured from the start of the
+ text snippet.
+ confidences (Sequence[float]):
+ The Model's confidences in correctness of the
+ predicted IDs, higher value means higher
+ confidence. Order matches the Ids.
+ """
+
+ ids = proto.RepeatedField(proto.INT64, number=1)
+
+ display_names = proto.RepeatedField(proto.STRING, number=2)
+
+ text_segment_start_offsets = proto.RepeatedField(proto.INT64, number=3)
+
+ text_segment_end_offsets = proto.RepeatedField(proto.INT64, number=4)
+
+ confidences = proto.RepeatedField(proto.FLOAT, number=5)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_sentiment.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_sentiment.py
new file mode 100644
index 0000000000..73c670f4ec
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_sentiment.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1.schema.predict.prediction",
+ manifest={"TextSentimentPredictionResult",},
+)
+
+
+class TextSentimentPredictionResult(proto.Message):
+ r"""Prediction output format for Text Sentiment
+
+ Attributes:
+ sentiment (int):
+ The integer sentiment labels between 0
+ (inclusive) and sentimentMax label (inclusive),
+ while 0 maps to the least positive sentiment and
+ sentimentMax maps to the most positive one. The
+ higher the score is, the more positive the
+ sentiment in the text snippet is. Note:
+ sentimentMax is an integer value between 1
+ (inclusive) and 10 (inclusive).
+ """
+
+ sentiment = proto.Field(proto.INT32, number=1)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py
new file mode 100644
index 0000000000..486853c63d
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py
@@ -0,0 +1,74 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+from google.protobuf import duration_pb2 as duration # type: ignore
+from google.protobuf import wrappers_pb2 as wrappers # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1.schema.predict.prediction",
+ manifest={"VideoActionRecognitionPredictionResult",},
+)
+
+
+class VideoActionRecognitionPredictionResult(proto.Message):
+ r"""Prediction output format for Video Action Recognition.
+
+ Attributes:
+ id (str):
+ The resource ID of the AnnotationSpec that
+ had been identified.
+ display_name (str):
+ The display name of the AnnotationSpec that
+ had been identified.
+ time_segment_start (google.protobuf.duration_pb2.Duration):
+ The beginning, inclusive, of the video's time
+ segment in which the AnnotationSpec has been
+ identified. Expressed as a number of seconds as
+ measured from the start of the video, with
+ fractions up to a microsecond precision, and
+ with "s" appended at the end.
+ time_segment_end (google.protobuf.duration_pb2.Duration):
+ The end, exclusive, of the video's time
+ segment in which the AnnotationSpec has been
+ identified. Expressed as a number of seconds as
+ measured from the start of the video, with
+ fractions up to a microsecond precision, and
+ with "s" appended at the end.
+ confidence (google.protobuf.wrappers_pb2.FloatValue):
+ The Model's confidence in correction of this
+ prediction, higher value means higher
+ confidence.
+ """
+
+ id = proto.Field(proto.STRING, number=1)
+
+ display_name = proto.Field(proto.STRING, number=2)
+
+ time_segment_start = proto.Field(
+ proto.MESSAGE, number=4, message=duration.Duration,
+ )
+
+ time_segment_end = proto.Field(proto.MESSAGE, number=5, message=duration.Duration,)
+
+ confidence = proto.Field(proto.MESSAGE, number=6, message=wrappers.FloatValue,)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py
new file mode 100644
index 0000000000..c043547d04
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py
@@ -0,0 +1,90 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+from google.protobuf import duration_pb2 as duration # type: ignore
+from google.protobuf import wrappers_pb2 as wrappers # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1.schema.predict.prediction",
+ manifest={"VideoClassificationPredictionResult",},
+)
+
+
+class VideoClassificationPredictionResult(proto.Message):
+ r"""Prediction output format for Video Classification.
+
+ Attributes:
+ id (str):
+ The resource ID of the AnnotationSpec that
+ had been identified.
+ display_name (str):
+ The display name of the AnnotationSpec that
+ had been identified.
+ type_ (str):
+ The type of the prediction. The requested
+ types can be configured via parameters. This
+ will be one of - segment-classification
+ - shot-classification
+ - one-sec-interval-classification
+ time_segment_start (google.protobuf.duration_pb2.Duration):
+ The beginning, inclusive, of the video's time
+ segment in which the AnnotationSpec has been
+ identified. Expressed as a number of seconds as
+ measured from the start of the video, with
+ fractions up to a microsecond precision, and
+ with "s" appended at the end. Note that for
+ 'segment-classification' prediction type, this
+ equals the original 'timeSegmentStart' from the
+ input instance, for other types it is the start
+ of a shot or a 1 second interval respectively.
+ time_segment_end (google.protobuf.duration_pb2.Duration):
+ The end, exclusive, of the video's time
+ segment in which the AnnotationSpec has been
+ identified. Expressed as a number of seconds as
+ measured from the start of the video, with
+ fractions up to a microsecond precision, and
+ with "s" appended at the end. Note that for
+ 'segment-classification' prediction type, this
+ equals the original 'timeSegmentEnd' from the
+ input instance, for other types it is the end of
+ a shot or a 1 second interval respectively.
+ confidence (google.protobuf.wrappers_pb2.FloatValue):
+ The Model's confidence in correction of this
+ prediction, higher value means higher
+ confidence.
+ """
+
+ id = proto.Field(proto.STRING, number=1)
+
+ display_name = proto.Field(proto.STRING, number=2)
+
+ type_ = proto.Field(proto.STRING, number=3)
+
+ time_segment_start = proto.Field(
+ proto.MESSAGE, number=4, message=duration.Duration,
+ )
+
+ time_segment_end = proto.Field(proto.MESSAGE, number=5, message=duration.Duration,)
+
+ confidence = proto.Field(proto.MESSAGE, number=6, message=wrappers.FloatValue,)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py
new file mode 100644
index 0000000000..d1b515a895
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py
@@ -0,0 +1,115 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+from google.protobuf import duration_pb2 as duration # type: ignore
+from google.protobuf import wrappers_pb2 as wrappers # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1.schema.predict.prediction",
+ manifest={"VideoObjectTrackingPredictionResult",},
+)
+
+
+class VideoObjectTrackingPredictionResult(proto.Message):
+ r"""Prediction output format for Video Object Tracking.
+
+ Attributes:
+ id (str):
+ The resource ID of the AnnotationSpec that
+ had been identified.
+ display_name (str):
+ The display name of the AnnotationSpec that
+ had been identified.
+ time_segment_start (google.protobuf.duration_pb2.Duration):
+ The beginning, inclusive, of the video's time
+ segment in which the object instance has been
+ detected. Expressed as a number of seconds as
+ measured from the start of the video, with
+ fractions up to a microsecond precision, and
+ with "s" appended at the end.
+ time_segment_end (google.protobuf.duration_pb2.Duration):
+ The end, inclusive, of the video's time
+ segment in which the object instance has been
+ detected. Expressed as a number of seconds as
+ measured from the start of the video, with
+ fractions up to a microsecond precision, and
+ with "s" appended at the end.
+ confidence (google.protobuf.wrappers_pb2.FloatValue):
+ The Model's confidence in correction of this
+ prediction, higher value means higher
+ confidence.
+ frames (Sequence[google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.VideoObjectTrackingPredictionResult.Frame]):
+ All of the frames of the video in which a
+ single object instance has been detected. The
+ bounding boxes in the frames identify the same
+ object.
+ """
+
+ class Frame(proto.Message):
+ r"""The fields ``xMin``, ``xMax``, ``yMin``, and ``yMax`` refer to a
+ bounding box, i.e. the rectangle over the video frame pinpointing
+ the found AnnotationSpec. The coordinates are relative to the frame
+ size, and the point 0,0 is in the top left of the frame.
+
+ Attributes:
+ time_offset (google.protobuf.duration_pb2.Duration):
+ A time (frame) of a video in which the object
+ has been detected. Expressed as a number of
+ seconds as measured from the start of the video,
+ with fractions up to a microsecond precision,
+ and with "s" appended at the end.
+ x_min (google.protobuf.wrappers_pb2.FloatValue):
+ The leftmost coordinate of the bounding box.
+ x_max (google.protobuf.wrappers_pb2.FloatValue):
+ The rightmost coordinate of the bounding box.
+ y_min (google.protobuf.wrappers_pb2.FloatValue):
+ The topmost coordinate of the bounding box.
+ y_max (google.protobuf.wrappers_pb2.FloatValue):
+ The bottommost coordinate of the bounding
+ box.
+ """
+
+ time_offset = proto.Field(proto.MESSAGE, number=1, message=duration.Duration,)
+
+ x_min = proto.Field(proto.MESSAGE, number=2, message=wrappers.FloatValue,)
+
+ x_max = proto.Field(proto.MESSAGE, number=3, message=wrappers.FloatValue,)
+
+ y_min = proto.Field(proto.MESSAGE, number=4, message=wrappers.FloatValue,)
+
+ y_max = proto.Field(proto.MESSAGE, number=5, message=wrappers.FloatValue,)
+
+ id = proto.Field(proto.STRING, number=1)
+
+ display_name = proto.Field(proto.STRING, number=2)
+
+ time_segment_start = proto.Field(
+ proto.MESSAGE, number=3, message=duration.Duration,
+ )
+
+ time_segment_end = proto.Field(proto.MESSAGE, number=4, message=duration.Duration,)
+
+ confidence = proto.Field(proto.MESSAGE, number=5, message=wrappers.FloatValue,)
+
+ frames = proto.RepeatedField(proto.MESSAGE, number=6, message=Frame,)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/__init__.py b/google/cloud/aiplatform/v1/schema/trainingjob/__init__.py
new file mode 100644
index 0000000000..60ade065d2
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/trainingjob/__init__.py
@@ -0,0 +1,20 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from google.cloud.aiplatform.v1.schema.trainingjob import definition
+
+__all__ = ("definition",)
diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition/__init__.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition/__init__.py
new file mode 100644
index 0000000000..f8620bb25d
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition/__init__.py
@@ -0,0 +1,120 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_classification import (
+ AutoMlImageClassification,
+)
+from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_classification import (
+ AutoMlImageClassificationInputs,
+)
+from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_classification import (
+ AutoMlImageClassificationMetadata,
+)
+from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_object_detection import (
+ AutoMlImageObjectDetection,
+)
+from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_object_detection import (
+ AutoMlImageObjectDetectionInputs,
+)
+from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_object_detection import (
+ AutoMlImageObjectDetectionMetadata,
+)
+from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_segmentation import (
+ AutoMlImageSegmentation,
+)
+from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_segmentation import (
+ AutoMlImageSegmentationInputs,
+)
+from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_segmentation import (
+ AutoMlImageSegmentationMetadata,
+)
+from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_tables import (
+ AutoMlTables,
+)
+from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_tables import (
+ AutoMlTablesInputs,
+)
+from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_tables import (
+ AutoMlTablesMetadata,
+)
+from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_text_classification import (
+ AutoMlTextClassification,
+)
+from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_text_classification import (
+ AutoMlTextClassificationInputs,
+)
+from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_text_extraction import (
+ AutoMlTextExtraction,
+)
+from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_text_extraction import (
+ AutoMlTextExtractionInputs,
+)
+from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_text_sentiment import (
+ AutoMlTextSentiment,
+)
+from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_text_sentiment import (
+ AutoMlTextSentimentInputs,
+)
+from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_video_action_recognition import (
+ AutoMlVideoActionRecognition,
+)
+from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_video_action_recognition import (
+ AutoMlVideoActionRecognitionInputs,
+)
+from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_video_classification import (
+ AutoMlVideoClassification,
+)
+from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_video_classification import (
+ AutoMlVideoClassificationInputs,
+)
+from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_video_object_tracking import (
+ AutoMlVideoObjectTracking,
+)
+from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_video_object_tracking import (
+ AutoMlVideoObjectTrackingInputs,
+)
+from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.export_evaluated_data_items_config import (
+ ExportEvaluatedDataItemsConfig,
+)
+
+__all__ = (
+ "AutoMlImageClassification",
+ "AutoMlImageClassificationInputs",
+ "AutoMlImageClassificationMetadata",
+ "AutoMlImageObjectDetection",
+ "AutoMlImageObjectDetectionInputs",
+ "AutoMlImageObjectDetectionMetadata",
+ "AutoMlImageSegmentation",
+ "AutoMlImageSegmentationInputs",
+ "AutoMlImageSegmentationMetadata",
+ "AutoMlTables",
+ "AutoMlTablesInputs",
+ "AutoMlTablesMetadata",
+ "AutoMlTextClassification",
+ "AutoMlTextClassificationInputs",
+ "AutoMlTextExtraction",
+ "AutoMlTextExtractionInputs",
+ "AutoMlTextSentiment",
+ "AutoMlTextSentimentInputs",
+ "AutoMlVideoActionRecognition",
+ "AutoMlVideoActionRecognitionInputs",
+ "AutoMlVideoClassification",
+ "AutoMlVideoClassificationInputs",
+ "AutoMlVideoObjectTracking",
+ "AutoMlVideoObjectTrackingInputs",
+ "ExportEvaluatedDataItemsConfig",
+)
diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition/py.typed b/google/cloud/aiplatform/v1/schema/trainingjob/definition/py.typed
new file mode 100644
index 0000000000..1a9d2972a0
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition/py.typed
@@ -0,0 +1,2 @@
+# Marker file for PEP 561.
+# The google-cloud-aiplatform-v1-schema-trainingjob-definition package uses inline types.
diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/__init__.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/__init__.py
new file mode 100644
index 0000000000..34958e5add
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/__init__.py
@@ -0,0 +1,71 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from .types.automl_image_classification import AutoMlImageClassification
+from .types.automl_image_classification import AutoMlImageClassificationInputs
+from .types.automl_image_classification import AutoMlImageClassificationMetadata
+from .types.automl_image_object_detection import AutoMlImageObjectDetection
+from .types.automl_image_object_detection import AutoMlImageObjectDetectionInputs
+from .types.automl_image_object_detection import AutoMlImageObjectDetectionMetadata
+from .types.automl_image_segmentation import AutoMlImageSegmentation
+from .types.automl_image_segmentation import AutoMlImageSegmentationInputs
+from .types.automl_image_segmentation import AutoMlImageSegmentationMetadata
+from .types.automl_tables import AutoMlTables
+from .types.automl_tables import AutoMlTablesInputs
+from .types.automl_tables import AutoMlTablesMetadata
+from .types.automl_text_classification import AutoMlTextClassification
+from .types.automl_text_classification import AutoMlTextClassificationInputs
+from .types.automl_text_extraction import AutoMlTextExtraction
+from .types.automl_text_extraction import AutoMlTextExtractionInputs
+from .types.automl_text_sentiment import AutoMlTextSentiment
+from .types.automl_text_sentiment import AutoMlTextSentimentInputs
+from .types.automl_video_action_recognition import AutoMlVideoActionRecognition
+from .types.automl_video_action_recognition import AutoMlVideoActionRecognitionInputs
+from .types.automl_video_classification import AutoMlVideoClassification
+from .types.automl_video_classification import AutoMlVideoClassificationInputs
+from .types.automl_video_object_tracking import AutoMlVideoObjectTracking
+from .types.automl_video_object_tracking import AutoMlVideoObjectTrackingInputs
+from .types.export_evaluated_data_items_config import ExportEvaluatedDataItemsConfig
+
+
+__all__ = (
+ "AutoMlImageClassificationInputs",
+ "AutoMlImageClassificationMetadata",
+ "AutoMlImageObjectDetection",
+ "AutoMlImageObjectDetectionInputs",
+ "AutoMlImageObjectDetectionMetadata",
+ "AutoMlImageSegmentation",
+ "AutoMlImageSegmentationInputs",
+ "AutoMlImageSegmentationMetadata",
+ "AutoMlTables",
+ "AutoMlTablesInputs",
+ "AutoMlTablesMetadata",
+ "AutoMlTextClassification",
+ "AutoMlTextClassificationInputs",
+ "AutoMlTextExtraction",
+ "AutoMlTextExtractionInputs",
+ "AutoMlTextSentiment",
+ "AutoMlTextSentimentInputs",
+ "AutoMlVideoActionRecognition",
+ "AutoMlVideoActionRecognitionInputs",
+ "AutoMlVideoClassification",
+ "AutoMlVideoClassificationInputs",
+ "AutoMlVideoObjectTracking",
+ "AutoMlVideoObjectTrackingInputs",
+ "ExportEvaluatedDataItemsConfig",
+ "AutoMlImageClassification",
+)
diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/py.typed b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/py.typed
new file mode 100644
index 0000000000..1a9d2972a0
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/py.typed
@@ -0,0 +1,2 @@
+# Marker file for PEP 561.
+# The google-cloud-aiplatform-v1-schema-trainingjob-definition package uses inline types.
diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/__init__.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/__init__.py
new file mode 100644
index 0000000000..8a60b2e36c
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/__init__.py
@@ -0,0 +1,90 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from .automl_image_classification import (
+ AutoMlImageClassification,
+ AutoMlImageClassificationInputs,
+ AutoMlImageClassificationMetadata,
+)
+from .automl_image_object_detection import (
+ AutoMlImageObjectDetection,
+ AutoMlImageObjectDetectionInputs,
+ AutoMlImageObjectDetectionMetadata,
+)
+from .automl_image_segmentation import (
+ AutoMlImageSegmentation,
+ AutoMlImageSegmentationInputs,
+ AutoMlImageSegmentationMetadata,
+)
+from .export_evaluated_data_items_config import ExportEvaluatedDataItemsConfig
+from .automl_tables import (
+ AutoMlTables,
+ AutoMlTablesInputs,
+ AutoMlTablesMetadata,
+)
+from .automl_text_classification import (
+ AutoMlTextClassification,
+ AutoMlTextClassificationInputs,
+)
+from .automl_text_extraction import (
+ AutoMlTextExtraction,
+ AutoMlTextExtractionInputs,
+)
+from .automl_text_sentiment import (
+ AutoMlTextSentiment,
+ AutoMlTextSentimentInputs,
+)
+from .automl_video_action_recognition import (
+ AutoMlVideoActionRecognition,
+ AutoMlVideoActionRecognitionInputs,
+)
+from .automl_video_classification import (
+ AutoMlVideoClassification,
+ AutoMlVideoClassificationInputs,
+)
+from .automl_video_object_tracking import (
+ AutoMlVideoObjectTracking,
+ AutoMlVideoObjectTrackingInputs,
+)
+
+__all__ = (
+ "AutoMlImageClassification",
+ "AutoMlImageClassificationInputs",
+ "AutoMlImageClassificationMetadata",
+ "AutoMlImageObjectDetection",
+ "AutoMlImageObjectDetectionInputs",
+ "AutoMlImageObjectDetectionMetadata",
+ "AutoMlImageSegmentation",
+ "AutoMlImageSegmentationInputs",
+ "AutoMlImageSegmentationMetadata",
+ "ExportEvaluatedDataItemsConfig",
+ "AutoMlTables",
+ "AutoMlTablesInputs",
+ "AutoMlTablesMetadata",
+ "AutoMlTextClassification",
+ "AutoMlTextClassificationInputs",
+ "AutoMlTextExtraction",
+ "AutoMlTextExtractionInputs",
+ "AutoMlTextSentiment",
+ "AutoMlTextSentimentInputs",
+ "AutoMlVideoActionRecognition",
+ "AutoMlVideoActionRecognitionInputs",
+ "AutoMlVideoClassification",
+ "AutoMlVideoClassificationInputs",
+ "AutoMlVideoObjectTracking",
+ "AutoMlVideoObjectTrackingInputs",
+)
diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_classification.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_classification.py
new file mode 100644
index 0000000000..f7e13c60b7
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_classification.py
@@ -0,0 +1,143 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1.schema.trainingjob.definition",
+ manifest={
+ "AutoMlImageClassification",
+ "AutoMlImageClassificationInputs",
+ "AutoMlImageClassificationMetadata",
+ },
+)
+
+
+class AutoMlImageClassification(proto.Message):
+ r"""A TrainingJob that trains and uploads an AutoML Image
+ Classification Model.
+
+ Attributes:
+ inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageClassificationInputs):
+ The input parameters of this TrainingJob.
+ metadata (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageClassificationMetadata):
+ The metadata information.
+ """
+
+ inputs = proto.Field(
+ proto.MESSAGE, number=1, message="AutoMlImageClassificationInputs",
+ )
+
+ metadata = proto.Field(
+ proto.MESSAGE, number=2, message="AutoMlImageClassificationMetadata",
+ )
+
+
+class AutoMlImageClassificationInputs(proto.Message):
+ r"""
+
+ Attributes:
+ model_type (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageClassificationInputs.ModelType):
+
+ base_model_id (str):
+ The ID of the ``base`` model. If it is specified, the new
+ model will be trained based on the ``base`` model.
+ Otherwise, the new model will be trained from scratch. The
+ ``base`` model must be in the same Project and Location as
+ the new Model to train, and have the same modelType.
+ budget_milli_node_hours (int):
+ The training budget of creating this model, expressed in
+ milli node hours i.e. 1,000 value in this field means 1 node
+ hour. The actual metadata.costMilliNodeHours will be equal
+ or less than this value. If further model training ceases to
+ provide any improvements, it will stop without using the
+ full budget and the metadata.successfulStopReason will be
+ ``model-converged``. Note, node_hour = actual_hour \*
+ number_of_nodes_involved. For modelType
+ ``cloud``\ (default), the budget must be between 8,000 and
+ 800,000 milli node hours, inclusive. The default value is
+ 192,000 which represents one day in wall time, considering 8
+ nodes are used. For model types ``mobile-tf-low-latency-1``,
+ ``mobile-tf-versatile-1``, ``mobile-tf-high-accuracy-1``,
+ the training budget must be between 1,000 and 100,000 milli
+ node hours, inclusive. The default value is 24,000 which
+ represents one day in wall time on a single node that is
+ used.
+ disable_early_stopping (bool):
+ Use the entire training budget. This disables
+ the early stopping feature. When false the early
+ stopping feature is enabled, which means that
+ AutoML Image Classification might stop training
+ before the entire training budget has been used.
+ multi_label (bool):
+ If false, a single-label (multi-class) Model
+ will be trained (i.e. assuming that for each
+ image just up to one annotation may be
+ applicable). If true, a multi-label Model will
+ be trained (i.e. assuming that for each image
+ multiple annotations may be applicable).
+ """
+
+ class ModelType(proto.Enum):
+ r""""""
+ MODEL_TYPE_UNSPECIFIED = 0
+ CLOUD = 1
+ MOBILE_TF_LOW_LATENCY_1 = 2
+ MOBILE_TF_VERSATILE_1 = 3
+ MOBILE_TF_HIGH_ACCURACY_1 = 4
+
+ model_type = proto.Field(proto.ENUM, number=1, enum=ModelType,)
+
+ base_model_id = proto.Field(proto.STRING, number=2)
+
+ budget_milli_node_hours = proto.Field(proto.INT64, number=3)
+
+ disable_early_stopping = proto.Field(proto.BOOL, number=4)
+
+ multi_label = proto.Field(proto.BOOL, number=5)
+
+
+class AutoMlImageClassificationMetadata(proto.Message):
+ r"""
+
+ Attributes:
+ cost_milli_node_hours (int):
+ The actual training cost of creating this
+ model, expressed in milli node hours, i.e. 1,000
+ value in this field means 1 node hour.
+ Guaranteed to not exceed
+ inputs.budgetMilliNodeHours.
+ successful_stop_reason (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageClassificationMetadata.SuccessfulStopReason):
+ For successful job completions, this is the
+ reason why the job has finished.
+ """
+
+ class SuccessfulStopReason(proto.Enum):
+ r""""""
+ SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0
+ BUDGET_REACHED = 1
+ MODEL_CONVERGED = 2
+
+ cost_milli_node_hours = proto.Field(proto.INT64, number=1)
+
+ successful_stop_reason = proto.Field(
+ proto.ENUM, number=2, enum=SuccessfulStopReason,
+ )
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_object_detection.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_object_detection.py
new file mode 100644
index 0000000000..1c2c9f83b7
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_object_detection.py
@@ -0,0 +1,128 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1.schema.trainingjob.definition",
+ manifest={
+ "AutoMlImageObjectDetection",
+ "AutoMlImageObjectDetectionInputs",
+ "AutoMlImageObjectDetectionMetadata",
+ },
+)
+
+
+class AutoMlImageObjectDetection(proto.Message):
+ r"""A TrainingJob that trains and uploads an AutoML Image Object
+ Detection Model.
+
+ Attributes:
+ inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageObjectDetectionInputs):
+ The input parameters of this TrainingJob.
+ metadata (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageObjectDetectionMetadata):
+ The metadata information
+ """
+
+ inputs = proto.Field(
+ proto.MESSAGE, number=1, message="AutoMlImageObjectDetectionInputs",
+ )
+
+ metadata = proto.Field(
+ proto.MESSAGE, number=2, message="AutoMlImageObjectDetectionMetadata",
+ )
+
+
+class AutoMlImageObjectDetectionInputs(proto.Message):
+ r"""
+
+ Attributes:
+ model_type (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageObjectDetectionInputs.ModelType):
+
+ budget_milli_node_hours (int):
+ The training budget of creating this model, expressed in
+ milli node hours i.e. 1,000 value in this field means 1 node
+ hour. The actual metadata.costMilliNodeHours will be equal
+ or less than this value. If further model training ceases to
+ provide any improvements, it will stop without using the
+ full budget and the metadata.successfulStopReason will be
+ ``model-converged``. Note, node_hour = actual_hour \*
+ number_of_nodes_involved. For modelType
+ ``cloud``\ (default), the budget must be between 20,000 and
+ 900,000 milli node hours, inclusive. The default value is
+ 216,000 which represents one day in wall time, considering 9
+ nodes are used. For model types ``mobile-tf-low-latency-1``,
+ ``mobile-tf-versatile-1``, ``mobile-tf-high-accuracy-1`` the
+ training budget must be between 1,000 and 100,000 milli node
+ hours, inclusive. The default value is 24,000 which
+ represents one day in wall time on a single node that is
+ used.
+ disable_early_stopping (bool):
+ Use the entire training budget. This disables
+ the early stopping feature. When false the early
+ stopping feature is enabled, which means that
+ AutoML Image Object Detection might stop
+ training before the entire training budget has
+ been used.
+ """
+
+ class ModelType(proto.Enum):
+ r""""""
+ MODEL_TYPE_UNSPECIFIED = 0
+ CLOUD_HIGH_ACCURACY_1 = 1
+ CLOUD_LOW_LATENCY_1 = 2
+ MOBILE_TF_LOW_LATENCY_1 = 3
+ MOBILE_TF_VERSATILE_1 = 4
+ MOBILE_TF_HIGH_ACCURACY_1 = 5
+
+ model_type = proto.Field(proto.ENUM, number=1, enum=ModelType,)
+
+ budget_milli_node_hours = proto.Field(proto.INT64, number=2)
+
+ disable_early_stopping = proto.Field(proto.BOOL, number=3)
+
+
+class AutoMlImageObjectDetectionMetadata(proto.Message):
+ r"""
+
+ Attributes:
+ cost_milli_node_hours (int):
+ The actual training cost of creating this
+ model, expressed in milli node hours, i.e. 1,000
+ value in this field means 1 node hour.
+ Guaranteed to not exceed
+ inputs.budgetMilliNodeHours.
+ successful_stop_reason (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageObjectDetectionMetadata.SuccessfulStopReason):
+ For successful job completions, this is the
+ reason why the job has finished.
+ """
+
+ class SuccessfulStopReason(proto.Enum):
+ r""""""
+ SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0
+ BUDGET_REACHED = 1
+ MODEL_CONVERGED = 2
+
+ cost_milli_node_hours = proto.Field(proto.INT64, number=1)
+
+ successful_stop_reason = proto.Field(
+ proto.ENUM, number=2, enum=SuccessfulStopReason,
+ )
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_segmentation.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_segmentation.py
new file mode 100644
index 0000000000..a81103657e
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_segmentation.py
@@ -0,0 +1,122 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1.schema.trainingjob.definition",
+ manifest={
+ "AutoMlImageSegmentation",
+ "AutoMlImageSegmentationInputs",
+ "AutoMlImageSegmentationMetadata",
+ },
+)
+
+
+class AutoMlImageSegmentation(proto.Message):
+ r"""A TrainingJob that trains and uploads an AutoML Image
+ Segmentation Model.
+
+ Attributes:
+ inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageSegmentationInputs):
+ The input parameters of this TrainingJob.
+ metadata (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageSegmentationMetadata):
+ The metadata information.
+ """
+
+ inputs = proto.Field(
+ proto.MESSAGE, number=1, message="AutoMlImageSegmentationInputs",
+ )
+
+ metadata = proto.Field(
+ proto.MESSAGE, number=2, message="AutoMlImageSegmentationMetadata",
+ )
+
+
+class AutoMlImageSegmentationInputs(proto.Message):
+ r"""
+
+ Attributes:
+ model_type (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageSegmentationInputs.ModelType):
+
+ budget_milli_node_hours (int):
+ The training budget of creating this model, expressed in
+ milli node hours i.e. 1,000 value in this field means 1 node
+ hour. The actual metadata.costMilliNodeHours will be equal
+ or less than this value. If further model training ceases to
+ provide any improvements, it will stop without using the
+ full budget and the metadata.successfulStopReason will be
+ ``model-converged``. Note, node_hour = actual_hour \*
+ number_of_nodes_involved. Or actaul_wall_clock_hours =
+ train_budget_milli_node_hours / (number_of_nodes_involved \*
+ 1000) For modelType ``cloud-high-accuracy-1``\ (default),
+ the budget must be between 20,000 and 2,000,000 milli node
+ hours, inclusive. The default value is 192,000 which
+ represents one day in wall time (1000 milli \* 24 hours \* 8
+ nodes).
+ base_model_id (str):
+ The ID of the ``base`` model. If it is specified, the new
+ model will be trained based on the ``base`` model.
+ Otherwise, the new model will be trained from scratch. The
+ ``base`` model must be in the same Project and Location as
+ the new Model to train, and have the same modelType.
+ """
+
+ class ModelType(proto.Enum):
+ r""""""
+ MODEL_TYPE_UNSPECIFIED = 0
+ CLOUD_HIGH_ACCURACY_1 = 1
+ CLOUD_LOW_ACCURACY_1 = 2
+ MOBILE_TF_LOW_LATENCY_1 = 3
+
+ model_type = proto.Field(proto.ENUM, number=1, enum=ModelType,)
+
+ budget_milli_node_hours = proto.Field(proto.INT64, number=2)
+
+ base_model_id = proto.Field(proto.STRING, number=3)
+
+
+class AutoMlImageSegmentationMetadata(proto.Message):
+ r"""
+
+ Attributes:
+ cost_milli_node_hours (int):
+ The actual training cost of creating this
+ model, expressed in milli node hours, i.e. 1,000
+ value in this field means 1 node hour.
+ Guaranteed to not exceed
+ inputs.budgetMilliNodeHours.
+ successful_stop_reason (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageSegmentationMetadata.SuccessfulStopReason):
+ For successful job completions, this is the
+ reason why the job has finished.
+ """
+
+ class SuccessfulStopReason(proto.Enum):
+ r""""""
+ SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0
+ BUDGET_REACHED = 1
+ MODEL_CONVERGED = 2
+
+ cost_milli_node_hours = proto.Field(proto.INT64, number=1)
+
+ successful_stop_reason = proto.Field(
+ proto.ENUM, number=2, enum=SuccessfulStopReason,
+ )
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_tables.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_tables.py
new file mode 100644
index 0000000000..1c3d0c8da7
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_tables.py
@@ -0,0 +1,447 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types import (
+ export_evaluated_data_items_config as gcastd_export_evaluated_data_items_config,
+)
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1.schema.trainingjob.definition",
+ manifest={"AutoMlTables", "AutoMlTablesInputs", "AutoMlTablesMetadata",},
+)
+
+
+class AutoMlTables(proto.Message):
+ r"""A TrainingJob that trains and uploads an AutoML Tables Model.
+
+ Attributes:
+ inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs):
+ The input parameters of this TrainingJob.
+ metadata (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesMetadata):
+ The metadata information.
+ """
+
+ inputs = proto.Field(proto.MESSAGE, number=1, message="AutoMlTablesInputs",)
+
+ metadata = proto.Field(proto.MESSAGE, number=2, message="AutoMlTablesMetadata",)
+
+
+class AutoMlTablesInputs(proto.Message):
+ r"""
+
+ Attributes:
+ optimization_objective_recall_value (float):
+ Required when optimization_objective is
+ "maximize-precision-at-recall". Must be between 0 and 1,
+ inclusive.
+ optimization_objective_precision_value (float):
+ Required when optimization_objective is
+ "maximize-recall-at-precision". Must be between 0 and 1,
+ inclusive.
+ prediction_type (str):
+ The type of prediction the Model is to
+ produce. "classification" - Predict one out of
+ multiple target values is
+ picked for each row.
+ "regression" - Predict a value based on its
+ relation to other values. This
+ type is available only to columns that contain
+ semantically numeric values, i.e. integers or
+ floating point number, even if
+ stored as e.g. strings.
+ target_column (str):
+ The column name of the target column that the
+ model is to predict.
+ transformations (Sequence[google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation]):
+ Each transformation will apply transform
+ function to given input column. And the result
+ will be used for training. When creating
+ transformation for BigQuery Struct column, the
+ column should be flattened using "." as the
+ delimiter.
+ optimization_objective (str):
+ Objective function the model is optimizing
+ towards. The training process creates a model
+ that maximizes/minimizes the value of the
+ objective function over the validation set.
+
+ The supported optimization objectives depend on
+ the prediction type. If the field is not set, a
+ default objective function is used.
+ classification (binary):
+ "maximize-au-roc" (default) - Maximize the
+ area under the receiver
+ operating characteristic (ROC) curve.
+ "minimize-log-loss" - Minimize log loss.
+ "maximize-au-prc" - Maximize the area under
+ the precision-recall curve. "maximize-
+ precision-at-recall" - Maximize precision for a
+ specified
+ recall value. "maximize-recall-at-precision" -
+ Maximize recall for a specified
+ precision value.
+ classification (multi-class):
+ "minimize-log-loss" (default) - Minimize log
+ loss.
+ regression:
+ "minimize-rmse" (default) - Minimize root-
+ mean-squared error (RMSE). "minimize-mae" -
+ Minimize mean-absolute error (MAE). "minimize-
+ rmsle" - Minimize root-mean-squared log error
+ (RMSLE).
+ train_budget_milli_node_hours (int):
+ Required. The train budget of creating this
+ model, expressed in milli node hours i.e. 1,000
+ value in this field means 1 node hour.
+ The training cost of the model will not exceed
+ this budget. The final cost will be attempted to
+ be close to the budget, though may end up being
+ (even) noticeably smaller - at the backend's
+ discretion. This especially may happen when
+ further model training ceases to provide any
+ improvements.
+ If the budget is set to a value known to be
+ insufficient to train a model for the given
+ dataset, the training won't be attempted and
+ will error.
+
+ The train budget must be between 1,000 and
+ 72,000 milli node hours, inclusive.
+ disable_early_stopping (bool):
+ Use the entire training budget. This disables
+ the early stopping feature. By default, the
+ early stopping feature is enabled, which means
+ that AutoML Tables might stop training before
+ the entire training budget has been used.
+ weight_column_name (str):
+ Column name that should be used as the weight
+ column. Higher values in this column give more
+ importance to the row during model training. The
+ column must have numeric values between 0 and
+ 10000 inclusively; 0 means the row is ignored
+ for training. If weight column field is not set,
+ then all rows are assumed to have equal weight
+ of 1.
+ export_evaluated_data_items_config (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.ExportEvaluatedDataItemsConfig):
+ Configuration for exporting test set
+ predictions to a BigQuery table. If this
+ configuration is absent, then the export is not
+ performed.
+ """
+
+ class Transformation(proto.Message):
+ r"""
+
+ Attributes:
+ auto (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.AutoTransformation):
+
+ numeric (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.NumericTransformation):
+
+ categorical (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.CategoricalTransformation):
+
+ timestamp (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.TimestampTransformation):
+
+ text (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.TextTransformation):
+
+ repeated_numeric (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.NumericArrayTransformation):
+
+ repeated_categorical (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.CategoricalArrayTransformation):
+
+ repeated_text (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.TextArrayTransformation):
+
+ """
+
+ class AutoTransformation(proto.Message):
+ r"""Training pipeline will infer the proper transformation based
+ on the statistic of dataset.
+
+ Attributes:
+ column_name (str):
+
+ """
+
+ column_name = proto.Field(proto.STRING, number=1)
+
+ class NumericTransformation(proto.Message):
+ r"""Training pipeline will perform following transformation functions.
+
+ - The value converted to float32.
+ - The z_score of the value.
+ - log(value+1) when the value is greater than or equal to 0.
+ Otherwise, this transformation is not applied and the value is
+ considered a missing value.
+ - z_score of log(value+1) when the value is greater than or equal
+ to 0. Otherwise, this transformation is not applied and the value
+ is considered a missing value.
+ - A boolean value that indicates whether the value is valid.
+
+ Attributes:
+ column_name (str):
+
+ invalid_values_allowed (bool):
+ If invalid values is allowed, the training
+ pipeline will create a boolean feature that
+ indicated whether the value is valid. Otherwise,
+ the training pipeline will discard the input row
+ from trainining data.
+ """
+
+ column_name = proto.Field(proto.STRING, number=1)
+
+ invalid_values_allowed = proto.Field(proto.BOOL, number=2)
+
+ class CategoricalTransformation(proto.Message):
+ r"""Training pipeline will perform following transformation functions.
+
+ - The categorical string as is--no change to case, punctuation,
+ spelling, tense, and so on.
+ - Convert the category name to a dictionary lookup index and
+ generate an embedding for each index.
+ - Categories that appear less than 5 times in the training dataset
+ are treated as the "unknown" category. The "unknown" category
+ gets its own special lookup index and resulting embedding.
+
+ Attributes:
+ column_name (str):
+
+ """
+
+ column_name = proto.Field(proto.STRING, number=1)
+
+ class TimestampTransformation(proto.Message):
+ r"""Training pipeline will perform following transformation functions.
+
+ - Apply the transformation functions for Numerical columns.
+ - Determine the year, month, day,and weekday. Treat each value from
+ the
+ - timestamp as a Categorical column.
+ - Invalid numerical values (for example, values that fall outside
+ of a typical timestamp range, or are extreme values) receive no
+ special treatment and are not removed.
+
+ Attributes:
+ column_name (str):
+
+ time_format (str):
+ The format in which that time field is expressed. The
+ time_format must either be one of:
+
+ - ``unix-seconds``
+ - ``unix-milliseconds``
+ - ``unix-microseconds``
+ - ``unix-nanoseconds`` (for respectively number of seconds,
+ milliseconds, microseconds and nanoseconds since start of
+ the Unix epoch); or be written in ``strftime`` syntax. If
+ time_format is not set, then the default format is RFC
+ 3339 ``date-time`` format, where ``time-offset`` =
+ ``"Z"`` (e.g. 1985-04-12T23:20:50.52Z)
+ invalid_values_allowed (bool):
+ If invalid values is allowed, the training
+ pipeline will create a boolean feature that
+ indicated whether the value is valid. Otherwise,
+ the training pipeline will discard the input row
+ from trainining data.
+ """
+
+ column_name = proto.Field(proto.STRING, number=1)
+
+ time_format = proto.Field(proto.STRING, number=2)
+
+ invalid_values_allowed = proto.Field(proto.BOOL, number=3)
+
+ class TextTransformation(proto.Message):
+ r"""Training pipeline will perform following transformation functions.
+
+ - The text as is--no change to case, punctuation, spelling, tense,
+ and so on.
+ - Tokenize text to words. Convert each words to a dictionary lookup
+ index and generate an embedding for each index. Combine the
+ embedding of all elements into a single embedding using the mean.
+ - Tokenization is based on unicode script boundaries.
+ - Missing values get their own lookup index and resulting
+ embedding.
+ - Stop-words receive no special treatment and are not removed.
+
+ Attributes:
+ column_name (str):
+
+ """
+
+ column_name = proto.Field(proto.STRING, number=1)
+
+ class NumericArrayTransformation(proto.Message):
+ r"""Treats the column as numerical array and performs following
+ transformation functions.
+
+ - All transformations for Numerical types applied to the average of
+ the all elements.
+ - The average of empty arrays is treated as zero.
+
+ Attributes:
+ column_name (str):
+
+ invalid_values_allowed (bool):
+ If invalid values is allowed, the training
+ pipeline will create a boolean feature that
+ indicated whether the value is valid. Otherwise,
+ the training pipeline will discard the input row
+ from trainining data.
+ """
+
+ column_name = proto.Field(proto.STRING, number=1)
+
+ invalid_values_allowed = proto.Field(proto.BOOL, number=2)
+
+ class CategoricalArrayTransformation(proto.Message):
+ r"""Treats the column as categorical array and performs following
+ transformation functions.
+
+ - For each element in the array, convert the category name to a
+ dictionary lookup index and generate an embedding for each index.
+ Combine the embedding of all elements into a single embedding
+ using the mean.
+ - Empty arrays treated as an embedding of zeroes.
+
+ Attributes:
+ column_name (str):
+
+ """
+
+ column_name = proto.Field(proto.STRING, number=1)
+
+ class TextArrayTransformation(proto.Message):
+ r"""Treats the column as text array and performs following
+ transformation functions.
+
+ - Concatenate all text values in the array into a single text value
+ using a space (" ") as a delimiter, and then treat the result as
+ a single text value. Apply the transformations for Text columns.
+ - Empty arrays treated as an empty text.
+
+ Attributes:
+ column_name (str):
+
+ """
+
+ column_name = proto.Field(proto.STRING, number=1)
+
+ auto = proto.Field(
+ proto.MESSAGE,
+ number=1,
+ oneof="transformation_detail",
+ message="AutoMlTablesInputs.Transformation.AutoTransformation",
+ )
+
+ numeric = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ oneof="transformation_detail",
+ message="AutoMlTablesInputs.Transformation.NumericTransformation",
+ )
+
+ categorical = proto.Field(
+ proto.MESSAGE,
+ number=3,
+ oneof="transformation_detail",
+ message="AutoMlTablesInputs.Transformation.CategoricalTransformation",
+ )
+
+ timestamp = proto.Field(
+ proto.MESSAGE,
+ number=4,
+ oneof="transformation_detail",
+ message="AutoMlTablesInputs.Transformation.TimestampTransformation",
+ )
+
+ text = proto.Field(
+ proto.MESSAGE,
+ number=5,
+ oneof="transformation_detail",
+ message="AutoMlTablesInputs.Transformation.TextTransformation",
+ )
+
+ repeated_numeric = proto.Field(
+ proto.MESSAGE,
+ number=6,
+ oneof="transformation_detail",
+ message="AutoMlTablesInputs.Transformation.NumericArrayTransformation",
+ )
+
+ repeated_categorical = proto.Field(
+ proto.MESSAGE,
+ number=7,
+ oneof="transformation_detail",
+ message="AutoMlTablesInputs.Transformation.CategoricalArrayTransformation",
+ )
+
+ repeated_text = proto.Field(
+ proto.MESSAGE,
+ number=8,
+ oneof="transformation_detail",
+ message="AutoMlTablesInputs.Transformation.TextArrayTransformation",
+ )
+
+ optimization_objective_recall_value = proto.Field(
+ proto.FLOAT, number=5, oneof="additional_optimization_objective_config"
+ )
+
+ optimization_objective_precision_value = proto.Field(
+ proto.FLOAT, number=6, oneof="additional_optimization_objective_config"
+ )
+
+ prediction_type = proto.Field(proto.STRING, number=1)
+
+ target_column = proto.Field(proto.STRING, number=2)
+
+ transformations = proto.RepeatedField(
+ proto.MESSAGE, number=3, message=Transformation,
+ )
+
+ optimization_objective = proto.Field(proto.STRING, number=4)
+
+ train_budget_milli_node_hours = proto.Field(proto.INT64, number=7)
+
+ disable_early_stopping = proto.Field(proto.BOOL, number=8)
+
+ weight_column_name = proto.Field(proto.STRING, number=9)
+
+ export_evaluated_data_items_config = proto.Field(
+ proto.MESSAGE,
+ number=10,
+ message=gcastd_export_evaluated_data_items_config.ExportEvaluatedDataItemsConfig,
+ )
+
+
+class AutoMlTablesMetadata(proto.Message):
+ r"""Model metadata specific to AutoML Tables.
+
+ Attributes:
+ train_cost_milli_node_hours (int):
+ Output only. The actual training cost of the
+ model, expressed in milli node hours, i.e. 1,000
+ value in this field means 1 node hour.
+ Guaranteed to not exceed the train budget.
+ """
+
+ train_cost_milli_node_hours = proto.Field(proto.INT64, number=1)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_classification.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_classification.py
new file mode 100644
index 0000000000..205deaf375
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_classification.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1.schema.trainingjob.definition",
+ manifest={"AutoMlTextClassification", "AutoMlTextClassificationInputs",},
+)
+
+
+class AutoMlTextClassification(proto.Message):
+ r"""A TrainingJob that trains and uploads an AutoML Text
+ Classification Model.
+
+ Attributes:
+ inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTextClassificationInputs):
+ The input parameters of this TrainingJob.
+ """
+
+ inputs = proto.Field(
+ proto.MESSAGE, number=1, message="AutoMlTextClassificationInputs",
+ )
+
+
+class AutoMlTextClassificationInputs(proto.Message):
+ r"""
+
+ Attributes:
+ multi_label (bool):
+
+ """
+
+ multi_label = proto.Field(proto.BOOL, number=1)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_extraction.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_extraction.py
new file mode 100644
index 0000000000..fad28847af
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_extraction.py
@@ -0,0 +1,43 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1.schema.trainingjob.definition",
+ manifest={"AutoMlTextExtraction", "AutoMlTextExtractionInputs",},
+)
+
+
+class AutoMlTextExtraction(proto.Message):
+ r"""A TrainingJob that trains and uploads an AutoML Text
+ Extraction Model.
+
+ Attributes:
+ inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTextExtractionInputs):
+ The input parameters of this TrainingJob.
+ """
+
+ inputs = proto.Field(proto.MESSAGE, number=1, message="AutoMlTextExtractionInputs",)
+
+
+class AutoMlTextExtractionInputs(proto.Message):
+ r""""""
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_sentiment.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_sentiment.py
new file mode 100644
index 0000000000..ca80a44d1d
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_sentiment.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1.schema.trainingjob.definition",
+ manifest={"AutoMlTextSentiment", "AutoMlTextSentimentInputs",},
+)
+
+
+class AutoMlTextSentiment(proto.Message):
+ r"""A TrainingJob that trains and uploads an AutoML Text
+ Sentiment Model.
+
+ Attributes:
+ inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTextSentimentInputs):
+ The input parameters of this TrainingJob.
+ """
+
+ inputs = proto.Field(proto.MESSAGE, number=1, message="AutoMlTextSentimentInputs",)
+
+
+class AutoMlTextSentimentInputs(proto.Message):
+ r"""
+
+ Attributes:
+ sentiment_max (int):
+ A sentiment is expressed as an integer
+ ordinal, where higher value means a more
+ positive sentiment. The range of sentiments that
+ will be used is between 0 and sentimentMax
+ (inclusive on both ends), and all the values in
+ the range must be represented in the dataset
+ before a model can be created.
+ Only the Annotations with this sentimentMax will
+ be used for training. sentimentMax value must be
+ between 1 and 10 (inclusive).
+ """
+
+ sentiment_max = proto.Field(proto.INT32, number=1)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_action_recognition.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_action_recognition.py
new file mode 100644
index 0000000000..1a20a6d725
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_action_recognition.py
@@ -0,0 +1,58 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1.schema.trainingjob.definition",
+ manifest={"AutoMlVideoActionRecognition", "AutoMlVideoActionRecognitionInputs",},
+)
+
+
+class AutoMlVideoActionRecognition(proto.Message):
+ r"""A TrainingJob that trains and uploads an AutoML Video Action
+ Recognition Model.
+
+ Attributes:
+ inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlVideoActionRecognitionInputs):
+ The input parameters of this TrainingJob.
+ """
+
+ inputs = proto.Field(
+ proto.MESSAGE, number=1, message="AutoMlVideoActionRecognitionInputs",
+ )
+
+
+class AutoMlVideoActionRecognitionInputs(proto.Message):
+ r"""
+
+ Attributes:
+ model_type (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlVideoActionRecognitionInputs.ModelType):
+
+ """
+
+ class ModelType(proto.Enum):
+ r""""""
+ MODEL_TYPE_UNSPECIFIED = 0
+ CLOUD = 1
+ MOBILE_VERSATILE_1 = 2
+
+ model_type = proto.Field(proto.ENUM, number=1, enum=ModelType,)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_classification.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_classification.py
new file mode 100644
index 0000000000..ba7f2d5b21
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_classification.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1.schema.trainingjob.definition",
+ manifest={"AutoMlVideoClassification", "AutoMlVideoClassificationInputs",},
+)
+
+
+class AutoMlVideoClassification(proto.Message):
+ r"""A TrainingJob that trains and uploads an AutoML Video
+ Classification Model.
+
+ Attributes:
+ inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlVideoClassificationInputs):
+ The input parameters of this TrainingJob.
+ """
+
+ inputs = proto.Field(
+ proto.MESSAGE, number=1, message="AutoMlVideoClassificationInputs",
+ )
+
+
+class AutoMlVideoClassificationInputs(proto.Message):
+ r"""
+
+ Attributes:
+ model_type (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlVideoClassificationInputs.ModelType):
+
+ """
+
+ class ModelType(proto.Enum):
+ r""""""
+ MODEL_TYPE_UNSPECIFIED = 0
+ CLOUD = 1
+ MOBILE_VERSATILE_1 = 2
+ MOBILE_JETSON_VERSATILE_1 = 3
+
+ model_type = proto.Field(proto.ENUM, number=1, enum=ModelType,)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_object_tracking.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_object_tracking.py
new file mode 100644
index 0000000000..0ecb1113d9
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_object_tracking.py
@@ -0,0 +1,62 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1.schema.trainingjob.definition",
+ manifest={"AutoMlVideoObjectTracking", "AutoMlVideoObjectTrackingInputs",},
+)
+
+
+class AutoMlVideoObjectTracking(proto.Message):
+ r"""A TrainingJob that trains and uploads an AutoML Video
+ ObjectTracking Model.
+
+ Attributes:
+ inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlVideoObjectTrackingInputs):
+ The input parameters of this TrainingJob.
+ """
+
+ inputs = proto.Field(
+ proto.MESSAGE, number=1, message="AutoMlVideoObjectTrackingInputs",
+ )
+
+
+class AutoMlVideoObjectTrackingInputs(proto.Message):
+ r"""
+
+ Attributes:
+ model_type (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlVideoObjectTrackingInputs.ModelType):
+
+ """
+
+ class ModelType(proto.Enum):
+ r""""""
+ MODEL_TYPE_UNSPECIFIED = 0
+ CLOUD = 1
+ MOBILE_VERSATILE_1 = 2
+ MOBILE_CORAL_VERSATILE_1 = 3
+ MOBILE_CORAL_LOW_LATENCY_1 = 4
+ MOBILE_JETSON_VERSATILE_1 = 5
+ MOBILE_JETSON_LOW_LATENCY_1 = 6
+
+ model_type = proto.Field(proto.ENUM, number=1, enum=ModelType,)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/export_evaluated_data_items_config.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/export_evaluated_data_items_config.py
new file mode 100644
index 0000000000..dc8a629412
--- /dev/null
+++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/export_evaluated_data_items_config.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1.schema.trainingjob.definition",
+ manifest={"ExportEvaluatedDataItemsConfig",},
+)
+
+
+class ExportEvaluatedDataItemsConfig(proto.Message):
+ r"""Configuration for exporting test set predictions to a
+ BigQuery table.
+
+ Attributes:
+ destination_bigquery_uri (str):
+ URI of desired destination BigQuery table. Expected format:
+ bq://::
+
+ If not specified, then results are exported to the following
+ auto-created BigQuery table:
+
+ :export_evaluated_examples__.evaluated_examples
+ override_existing_table (bool):
+ If true and an export destination is
+ specified, then the contents of the destination
+ are overwritten. Otherwise, if the export
+ destination already exists, then the export
+ operation fails.
+ """
+
+ destination_bigquery_uri = proto.Field(proto.STRING, number=1)
+
+ override_existing_table = proto.Field(proto.BOOL, number=2)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py
index 3c4e8af160..32ad0e9ff2 100644
--- a/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py
+++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py
@@ -14,9 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-from google.cloud.aiplatform.helpers import _decorators
-from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1 import types as pkg
-
from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.image_classification import (
ImageClassificationPredictionInstance,
)
@@ -56,4 +53,3 @@
"VideoClassificationPredictionInstance",
"VideoObjectTrackingPredictionInstance",
)
-_decorators._add_methods_to_classes_in_package(pkg)
diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py
index 3160c08e1d..041fe6cdb1 100644
--- a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py
+++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py
@@ -25,7 +25,6 @@
from .video_classification import VideoClassificationPredictionInstance
from .video_object_tracking import VideoObjectTrackingPredictionInstance
-
__all__ = (
"ImageClassificationPredictionInstance",
"ImageObjectDetectionPredictionInstance",
diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py
index 45471523c9..4a410f3904 100644
--- a/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py
+++ b/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py
@@ -14,9 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-from google.cloud.aiplatform.helpers import _decorators
-from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1 import types as pkg
-
from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.image_classification import (
ImageClassificationPredictionParams,
)
@@ -44,4 +41,3 @@
"VideoClassificationPredictionParams",
"VideoObjectTrackingPredictionParams",
)
-_decorators._add_methods_to_classes_in_package(pkg)
diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py
index 39202720fa..2f2c29bba5 100644
--- a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py
+++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py
@@ -22,7 +22,6 @@
from .video_classification import VideoClassificationPredictionParams
from .video_object_tracking import VideoObjectTrackingPredictionParams
-
__all__ = (
"ImageClassificationPredictionParams",
"ImageObjectDetectionPredictionParams",
diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py
index f8a4d63d58..159824217b 100644
--- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py
+++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py
@@ -14,11 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-from google.cloud.aiplatform.helpers import _decorators
-from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1 import (
- types as pkg,
-)
-
from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.classification import (
ClassificationPredictionResult,
)
@@ -66,4 +61,3 @@
"VideoClassificationPredictionResult",
"VideoObjectTrackingPredictionResult",
)
-_decorators._add_methods_to_classes_in_package(pkg)
diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py
index 2d6c8a98d3..5ec1ed095e 100644
--- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py
+++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py
@@ -27,7 +27,6 @@
from .video_classification import VideoClassificationPredictionResult
from .video_object_tracking import VideoObjectTrackingPredictionResult
-
__all__ = (
"ClassificationPredictionResult",
"ImageObjectDetectionPredictionResult",
diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py
index 1bf5002c2a..3d0f7f1f76 100644
--- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py
+++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py
@@ -42,7 +42,7 @@ class ImageObjectDetectionPredictionResult(proto.Message):
The Model's confidences in correctness of the
predicted IDs, higher value means higher
confidence. Order matches the Ids.
- bboxes (Sequence[~.struct.ListValue]):
+ bboxes (Sequence[google.protobuf.struct_pb2.ListValue]):
Bounding boxes, i.e. the rectangles over the image, that
pinpoint the found AnnotationSpecs. Given in order that
matches the IDs. Each bounding box is an array of 4 numbers
diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py
index 39ef21bf21..f31b95a18f 100644
--- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py
+++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py
@@ -17,11 +17,6 @@
import proto # type: ignore
-# DO NOT OVERWRITE FOLLOWING LINE: it was manually edited.
-from google.cloud.aiplatform.v1beta1.schema.predict.instance import (
- TextSentimentPredictionInstance,
-)
-
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1beta1.schema.predict.prediction",
@@ -30,39 +25,21 @@
class TextSentimentPredictionResult(proto.Message):
- r"""Represents a line of JSONL in the text sentiment batch
- prediction output file. This is a hack to allow printing of
- integer values.
+ r"""Prediction output format for Text Sentiment
Attributes:
- instance (~.gcaspi_text_sentiment.TextSentimentPredictionInstance):
- User's input instance.
- prediction (~.gcaspp_text_sentiment.TextSentimentPredictionResult.Prediction):
- The prediction result.
+ sentiment (int):
+ The integer sentiment labels between 0
+ (inclusive) and sentimentMax label (inclusive),
+ while 0 maps to the least positive sentiment and
+ sentimentMax maps to the most positive one. The
+ higher the score is, the more positive the
+ sentiment in the text snippet is. Note:
+ sentimentMax is an integer value between 1
+ (inclusive) and 10 (inclusive).
"""
- class Prediction(proto.Message):
- r"""Prediction output format for Text Sentiment.
-
- Attributes:
- sentiment (int):
- The integer sentiment labels between 0
- (inclusive) and sentimentMax label (inclusive),
- while 0 maps to the least positive sentiment and
- sentimentMax maps to the most positive one. The
- higher the score is, the more positive the
- sentiment in the text snippet is. Note:
- sentimentMax is an integer value between 1
- (inclusive) and 10 (inclusive).
- """
-
- sentiment = proto.Field(proto.INT32, number=1)
-
- instance = proto.Field(
- proto.MESSAGE, number=1, message=TextSentimentPredictionInstance,
- )
-
- prediction = proto.Field(proto.MESSAGE, number=2, message=Prediction,)
+ sentiment = proto.Field(proto.INT32, number=1)
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py
index f76b51899b..99fa365b47 100644
--- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py
+++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py
@@ -38,21 +38,21 @@ class VideoActionRecognitionPredictionResult(proto.Message):
display_name (str):
The display name of the AnnotationSpec that
had been identified.
- time_segment_start (~.duration.Duration):
+ time_segment_start (google.protobuf.duration_pb2.Duration):
The beginning, inclusive, of the video's time
segment in which the AnnotationSpec has been
identified. Expressed as a number of seconds as
measured from the start of the video, with
fractions up to a microsecond precision, and
with "s" appended at the end.
- time_segment_end (~.duration.Duration):
+ time_segment_end (google.protobuf.duration_pb2.Duration):
The end, exclusive, of the video's time
segment in which the AnnotationSpec has been
identified. Expressed as a number of seconds as
measured from the start of the video, with
fractions up to a microsecond precision, and
with "s" appended at the end.
- confidence (~.wrappers.FloatValue):
+ confidence (google.protobuf.wrappers_pb2.FloatValue):
The Model's confidence in correction of this
prediction, higher value means higher
confidence.
diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py
index 469023b122..3fca68fe64 100644
--- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py
+++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py
@@ -44,7 +44,7 @@ class VideoClassificationPredictionResult(proto.Message):
will be one of - segment-classification
- shot-classification
- one-sec-interval-classification
- time_segment_start (~.duration.Duration):
+ time_segment_start (google.protobuf.duration_pb2.Duration):
The beginning, inclusive, of the video's time
segment in which the AnnotationSpec has been
identified. Expressed as a number of seconds as
@@ -55,7 +55,7 @@ class VideoClassificationPredictionResult(proto.Message):
equals the original 'timeSegmentStart' from the
input instance, for other types it is the start
of a shot or a 1 second interval respectively.
- time_segment_end (~.duration.Duration):
+ time_segment_end (google.protobuf.duration_pb2.Duration):
The end, exclusive, of the video's time
segment in which the AnnotationSpec has been
identified. Expressed as a number of seconds as
@@ -66,7 +66,7 @@ class VideoClassificationPredictionResult(proto.Message):
equals the original 'timeSegmentEnd' from the
input instance, for other types it is the end of
a shot or a 1 second interval respectively.
- confidence (~.wrappers.FloatValue):
+ confidence (google.protobuf.wrappers_pb2.FloatValue):
The Model's confidence in correction of this
prediction, higher value means higher
confidence.
diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py
index 026f80a325..6fd431c0dd 100644
--- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py
+++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py
@@ -38,25 +38,25 @@ class VideoObjectTrackingPredictionResult(proto.Message):
display_name (str):
The display name of the AnnotationSpec that
had been identified.
- time_segment_start (~.duration.Duration):
+ time_segment_start (google.protobuf.duration_pb2.Duration):
The beginning, inclusive, of the video's time
segment in which the object instance has been
detected. Expressed as a number of seconds as
measured from the start of the video, with
fractions up to a microsecond precision, and
with "s" appended at the end.
- time_segment_end (~.duration.Duration):
+ time_segment_end (google.protobuf.duration_pb2.Duration):
The end, inclusive, of the video's time
segment in which the object instance has been
detected. Expressed as a number of seconds as
measured from the start of the video, with
fractions up to a microsecond precision, and
with "s" appended at the end.
- confidence (~.wrappers.FloatValue):
+ confidence (google.protobuf.wrappers_pb2.FloatValue):
The Model's confidence in correction of this
prediction, higher value means higher
confidence.
- frames (Sequence[~.video_object_tracking.VideoObjectTrackingPredictionResult.Frame]):
+ frames (Sequence[google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.VideoObjectTrackingPredictionResult.Frame]):
All of the frames of the video in which a
single object instance has been detected. The
bounding boxes in the frames identify the same
@@ -70,19 +70,19 @@ class Frame(proto.Message):
size, and the point 0,0 is in the top left of the frame.
Attributes:
- time_offset (~.duration.Duration):
+ time_offset (google.protobuf.duration_pb2.Duration):
A time (frame) of a video in which the object
has been detected. Expressed as a number of
seconds as measured from the start of the video,
with fractions up to a microsecond precision,
and with "s" appended at the end.
- x_min (~.wrappers.FloatValue):
+ x_min (google.protobuf.wrappers_pb2.FloatValue):
The leftmost coordinate of the bounding box.
- x_max (~.wrappers.FloatValue):
+ x_max (google.protobuf.wrappers_pb2.FloatValue):
The rightmost coordinate of the bounding box.
- y_min (~.wrappers.FloatValue):
+ y_min (google.protobuf.wrappers_pb2.FloatValue):
The topmost coordinate of the bounding box.
- y_max (~.wrappers.FloatValue):
+ y_max (google.protobuf.wrappers_pb2.FloatValue):
The bottommost coordinate of the bounding
box.
"""
diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py
index 8c71f1f7cf..392fae649e 100644
--- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py
+++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py
@@ -14,11 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-from google.cloud.aiplatform.helpers import _decorators
-from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1 import (
- types as pkg,
-)
-
from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_forecasting import (
AutoMlForecasting,
)
@@ -134,4 +129,3 @@
"AutoMlVideoObjectTrackingInputs",
"ExportEvaluatedDataItemsConfig",
)
-_decorators._add_methods_to_classes_in_package(pkg)
diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/__init__.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/__init__.py
index 6a0e7903b2..3853ca87a9 100644
--- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/__init__.py
+++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/__init__.py
@@ -66,7 +66,6 @@
AutoMlVideoObjectTrackingInputs,
)
-
__all__ = (
"ExportEvaluatedDataItemsConfig",
"AutoMlForecasting",
diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_forecasting.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_forecasting.py
index 710793c9a7..34f700f8af 100644
--- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_forecasting.py
+++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_forecasting.py
@@ -38,9 +38,9 @@ class AutoMlForecasting(proto.Message):
Model.
Attributes:
- inputs (~.automl_forecasting.AutoMlForecastingInputs):
+ inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs):
The input parameters of this TrainingJob.
- metadata (~.automl_forecasting.AutoMlForecastingMetadata):
+ metadata (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingMetadata):
The metadata information.
"""
@@ -64,7 +64,7 @@ class AutoMlForecastingInputs(proto.Message):
time_column (str):
The name of the column that identifies time
order in the time series.
- transformations (Sequence[~.automl_forecasting.AutoMlForecastingInputs.Transformation]):
+ transformations (Sequence[google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Transformation]):
Each transformation will apply transform
function to given input column. And the result
will be used for training. When creating
@@ -78,14 +78,14 @@ class AutoMlForecastingInputs(proto.Message):
function over the validation set.
The supported optimization objectives:
- "minimize-rmse" (default) - Minimize root-
+ "minimize-rmse" (default) - Minimize root-
mean-squared error (RMSE). "minimize-mae" -
Minimize mean-absolute error (MAE). "minimize-
rmsle" - Minimize root-mean-squared log error
(RMSLE). "minimize-rmspe" - Minimize root-
mean-squared percentage error (RMSPE).
"minimize-wape-mae" - Minimize the combination
- of weighted absolute percentage error (WAPE)
+ of weighted absolute percentage error (WAPE)
and mean-absolute-error (MAE).
train_budget_milli_node_hours (int):
Required. The train budget of creating this
@@ -131,7 +131,7 @@ class AutoMlForecastingInputs(proto.Message):
contains information for the given entity
(identified by the key column) that is known for
the past and the future
- period (~.automl_forecasting.AutoMlForecastingInputs.Period):
+ period (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Period):
Expected difference in time granularity
between rows in the data. If it is not set, the
period is inferred from data.
@@ -152,7 +152,7 @@ class AutoMlForecastingInputs(proto.Message):
sequence, where each period is one unit of granularity as
defined by the ``period``. Default value 0 means that it
lets algorithm to define the value. Inclusive.
- export_evaluated_data_items_config (~.gcastd_export_evaluated_data_items_config.ExportEvaluatedDataItemsConfig):
+ export_evaluated_data_items_config (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.ExportEvaluatedDataItemsConfig):
Configuration for exporting test set
predictions to a BigQuery table. If this
configuration is absent, then the export is not
@@ -163,21 +163,21 @@ class Transformation(proto.Message):
r"""
Attributes:
- auto (~.automl_forecasting.AutoMlForecastingInputs.Transformation.AutoTransformation):
+ auto (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Transformation.AutoTransformation):
- numeric (~.automl_forecasting.AutoMlForecastingInputs.Transformation.NumericTransformation):
+ numeric (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Transformation.NumericTransformation):
- categorical (~.automl_forecasting.AutoMlForecastingInputs.Transformation.CategoricalTransformation):
+ categorical (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Transformation.CategoricalTransformation):
- timestamp (~.automl_forecasting.AutoMlForecastingInputs.Transformation.TimestampTransformation):
+ timestamp (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Transformation.TimestampTransformation):
- text (~.automl_forecasting.AutoMlForecastingInputs.Transformation.TextTransformation):
+ text (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Transformation.TextTransformation):
- repeated_numeric (~.automl_forecasting.AutoMlForecastingInputs.Transformation.NumericArrayTransformation):
+ repeated_numeric (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Transformation.NumericArrayTransformation):
- repeated_categorical (~.automl_forecasting.AutoMlForecastingInputs.Transformation.CategoricalArrayTransformation):
+ repeated_categorical (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Transformation.CategoricalArrayTransformation):
- repeated_text (~.automl_forecasting.AutoMlForecastingInputs.Transformation.TextArrayTransformation):
+ repeated_text (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Transformation.TextArrayTransformation):
"""
@@ -418,11 +418,11 @@ class Period(proto.Message):
unit (str):
The time granularity unit of this time
period. The supported unit are:
- "hour"
- "day"
- "week"
- "month"
- "year".
+ "hour"
+ "day"
+ "week"
+ "month"
+ "year".
quantity (int):
The number of units per period, e.g. 3 weeks
or 2 months.
diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_classification.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_classification.py
index 0ee0394192..8ee27076d2 100644
--- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_classification.py
+++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_classification.py
@@ -33,9 +33,9 @@ class AutoMlImageClassification(proto.Message):
Classification Model.
Attributes:
- inputs (~.automl_image_classification.AutoMlImageClassificationInputs):
+ inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageClassificationInputs):
The input parameters of this TrainingJob.
- metadata (~.automl_image_classification.AutoMlImageClassificationMetadata):
+ metadata (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageClassificationMetadata):
The metadata information.
"""
@@ -52,7 +52,7 @@ class AutoMlImageClassificationInputs(proto.Message):
r"""
Attributes:
- model_type (~.automl_image_classification.AutoMlImageClassificationInputs.ModelType):
+ model_type (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageClassificationInputs.ModelType):
base_model_id (str):
The ID of the ``base`` model. If it is specified, the new
@@ -122,7 +122,7 @@ class AutoMlImageClassificationMetadata(proto.Message):
value in this field means 1 node hour.
Guaranteed to not exceed
inputs.budgetMilliNodeHours.
- successful_stop_reason (~.automl_image_classification.AutoMlImageClassificationMetadata.SuccessfulStopReason):
+ successful_stop_reason (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageClassificationMetadata.SuccessfulStopReason):
For successful job completions, this is the
reason why the job has finished.
"""
diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_object_detection.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_object_detection.py
index 3fb9d3ae1d..512e35ed1d 100644
--- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_object_detection.py
+++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_object_detection.py
@@ -33,9 +33,9 @@ class AutoMlImageObjectDetection(proto.Message):
Detection Model.
Attributes:
- inputs (~.automl_image_object_detection.AutoMlImageObjectDetectionInputs):
+ inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageObjectDetectionInputs):
The input parameters of this TrainingJob.
- metadata (~.automl_image_object_detection.AutoMlImageObjectDetectionMetadata):
+ metadata (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageObjectDetectionMetadata):
The metadata information
"""
@@ -52,7 +52,7 @@ class AutoMlImageObjectDetectionInputs(proto.Message):
r"""
Attributes:
- model_type (~.automl_image_object_detection.AutoMlImageObjectDetectionInputs.ModelType):
+ model_type (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageObjectDetectionInputs.ModelType):
budget_milli_node_hours (int):
The training budget of creating this model, expressed in
@@ -107,7 +107,7 @@ class AutoMlImageObjectDetectionMetadata(proto.Message):
value in this field means 1 node hour.
Guaranteed to not exceed
inputs.budgetMilliNodeHours.
- successful_stop_reason (~.automl_image_object_detection.AutoMlImageObjectDetectionMetadata.SuccessfulStopReason):
+ successful_stop_reason (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageObjectDetectionMetadata.SuccessfulStopReason):
For successful job completions, this is the
reason why the job has finished.
"""
diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_segmentation.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_segmentation.py
index 0fa3788b11..22c199e7f5 100644
--- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_segmentation.py
+++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_segmentation.py
@@ -33,9 +33,9 @@ class AutoMlImageSegmentation(proto.Message):
Segmentation Model.
Attributes:
- inputs (~.automl_image_segmentation.AutoMlImageSegmentationInputs):
+ inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageSegmentationInputs):
The input parameters of this TrainingJob.
- metadata (~.automl_image_segmentation.AutoMlImageSegmentationMetadata):
+ metadata (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageSegmentationMetadata):
The metadata information.
"""
@@ -52,7 +52,7 @@ class AutoMlImageSegmentationInputs(proto.Message):
r"""
Attributes:
- model_type (~.automl_image_segmentation.AutoMlImageSegmentationInputs.ModelType):
+ model_type (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageSegmentationInputs.ModelType):
budget_milli_node_hours (int):
The training budget of creating this model, expressed in
@@ -100,7 +100,7 @@ class AutoMlImageSegmentationMetadata(proto.Message):
value in this field means 1 node hour.
Guaranteed to not exceed
inputs.budgetMilliNodeHours.
- successful_stop_reason (~.automl_image_segmentation.AutoMlImageSegmentationMetadata.SuccessfulStopReason):
+ successful_stop_reason (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageSegmentationMetadata.SuccessfulStopReason):
For successful job completions, this is the
reason why the job has finished.
"""
diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py
index f924979bd6..19c43929e8 100644
--- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py
+++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py
@@ -33,9 +33,9 @@ class AutoMlTables(proto.Message):
r"""A TrainingJob that trains and uploads an AutoML Tables Model.
Attributes:
- inputs (~.automl_tables.AutoMlTablesInputs):
+ inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs):
The input parameters of this TrainingJob.
- metadata (~.automl_tables.AutoMlTablesMetadata):
+ metadata (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesMetadata):
The metadata information.
"""
@@ -61,7 +61,7 @@ class AutoMlTablesInputs(proto.Message):
produce. "classification" - Predict one out of
multiple target values is
picked for each row.
- "regression" - Predict a value based on its
+ "regression" - Predict a value based on its
relation to other values. This
type is available only to columns that contain
semantically numeric values, i.e. integers or
@@ -70,7 +70,7 @@ class AutoMlTablesInputs(proto.Message):
target_column (str):
The column name of the target column that the
model is to predict.
- transformations (Sequence[~.automl_tables.AutoMlTablesInputs.Transformation]):
+ transformations (Sequence[google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation]):
Each transformation will apply transform
function to given input column. And the result
will be used for training. When creating
@@ -87,11 +87,11 @@ class AutoMlTablesInputs(proto.Message):
the prediction type. If the field is not set, a
default objective function is used.
classification (binary):
- "maximize-au-roc" (default) - Maximize the
+ "maximize-au-roc" (default) - Maximize the
area under the receiver
operating characteristic (ROC) curve.
"minimize-log-loss" - Minimize log loss.
- "maximize-au-prc" - Maximize the area under
+ "maximize-au-prc" - Maximize the area under
the precision-recall curve. "maximize-
precision-at-recall" - Maximize precision for a
specified
@@ -99,10 +99,10 @@ class AutoMlTablesInputs(proto.Message):
Maximize recall for a specified
precision value.
classification (multi-class):
- "minimize-log-loss" (default) - Minimize log
+ "minimize-log-loss" (default) - Minimize log
loss.
regression:
- "minimize-rmse" (default) - Minimize root-
+ "minimize-rmse" (default) - Minimize root-
mean-squared error (RMSE). "minimize-mae" -
Minimize mean-absolute error (MAE). "minimize-
rmsle" - Minimize root-mean-squared log error
@@ -140,7 +140,7 @@ class AutoMlTablesInputs(proto.Message):
for training. If weight column field is not set,
then all rows are assumed to have equal weight
of 1.
- export_evaluated_data_items_config (~.gcastd_export_evaluated_data_items_config.ExportEvaluatedDataItemsConfig):
+ export_evaluated_data_items_config (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.ExportEvaluatedDataItemsConfig):
Configuration for exporting test set
predictions to a BigQuery table. If this
configuration is absent, then the export is not
@@ -151,21 +151,21 @@ class Transformation(proto.Message):
r"""
Attributes:
- auto (~.automl_tables.AutoMlTablesInputs.Transformation.AutoTransformation):
+ auto (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.AutoTransformation):
- numeric (~.automl_tables.AutoMlTablesInputs.Transformation.NumericTransformation):
+ numeric (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.NumericTransformation):
- categorical (~.automl_tables.AutoMlTablesInputs.Transformation.CategoricalTransformation):
+ categorical (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.CategoricalTransformation):
- timestamp (~.automl_tables.AutoMlTablesInputs.Transformation.TimestampTransformation):
+ timestamp (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.TimestampTransformation):
- text (~.automl_tables.AutoMlTablesInputs.Transformation.TextTransformation):
+ text (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.TextTransformation):
- repeated_numeric (~.automl_tables.AutoMlTablesInputs.Transformation.NumericArrayTransformation):
+ repeated_numeric (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.NumericArrayTransformation):
- repeated_categorical (~.automl_tables.AutoMlTablesInputs.Transformation.CategoricalArrayTransformation):
+ repeated_categorical (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.CategoricalArrayTransformation):
- repeated_text (~.automl_tables.AutoMlTablesInputs.Transformation.TextArrayTransformation):
+ repeated_text (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.TextArrayTransformation):
"""
diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_classification.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_classification.py
index ca75734600..9fe6b865c9 100644
--- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_classification.py
+++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_classification.py
@@ -29,7 +29,7 @@ class AutoMlTextClassification(proto.Message):
Classification Model.
Attributes:
- inputs (~.automl_text_classification.AutoMlTextClassificationInputs):
+ inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTextClassificationInputs):
The input parameters of this TrainingJob.
"""
diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_extraction.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_extraction.py
index 336509af22..c7b1fc6dba 100644
--- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_extraction.py
+++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_extraction.py
@@ -29,7 +29,7 @@ class AutoMlTextExtraction(proto.Message):
Extraction Model.
Attributes:
- inputs (~.automl_text_extraction.AutoMlTextExtractionInputs):
+ inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTextExtractionInputs):
The input parameters of this TrainingJob.
"""
diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_sentiment.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_sentiment.py
index d5de97e2b2..8239b55fdf 100644
--- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_sentiment.py
+++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_sentiment.py
@@ -29,7 +29,7 @@ class AutoMlTextSentiment(proto.Message):
Sentiment Model.
Attributes:
- inputs (~.automl_text_sentiment.AutoMlTextSentimentInputs):
+ inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTextSentimentInputs):
The input parameters of this TrainingJob.
"""
diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py
index d6969d93c6..66448faf01 100644
--- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py
+++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py
@@ -29,7 +29,7 @@ class AutoMlVideoActionRecognition(proto.Message):
Recognition Model.
Attributes:
- inputs (~.automl_video_action_recognition.AutoMlVideoActionRecognitionInputs):
+ inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlVideoActionRecognitionInputs):
The input parameters of this TrainingJob.
"""
@@ -42,7 +42,7 @@ class AutoMlVideoActionRecognitionInputs(proto.Message):
r"""
Attributes:
- model_type (~.automl_video_action_recognition.AutoMlVideoActionRecognitionInputs.ModelType):
+ model_type (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlVideoActionRecognitionInputs.ModelType):
"""
diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_classification.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_classification.py
index 3164544d47..51195eb327 100644
--- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_classification.py
+++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_classification.py
@@ -29,7 +29,7 @@ class AutoMlVideoClassification(proto.Message):
Classification Model.
Attributes:
- inputs (~.automl_video_classification.AutoMlVideoClassificationInputs):
+ inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlVideoClassificationInputs):
The input parameters of this TrainingJob.
"""
@@ -42,7 +42,7 @@ class AutoMlVideoClassificationInputs(proto.Message):
r"""
Attributes:
- model_type (~.automl_video_classification.AutoMlVideoClassificationInputs.ModelType):
+ model_type (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlVideoClassificationInputs.ModelType):
"""
diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_object_tracking.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_object_tracking.py
index 0fd8c7ec7a..328e266a3b 100644
--- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_object_tracking.py
+++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_object_tracking.py
@@ -29,7 +29,7 @@ class AutoMlVideoObjectTracking(proto.Message):
ObjectTracking Model.
Attributes:
- inputs (~.automl_video_object_tracking.AutoMlVideoObjectTrackingInputs):
+ inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlVideoObjectTrackingInputs):
The input parameters of this TrainingJob.
"""
@@ -42,7 +42,7 @@ class AutoMlVideoObjectTrackingInputs(proto.Message):
r"""
Attributes:
- model_type (~.automl_video_object_tracking.AutoMlVideoObjectTrackingInputs.ModelType):
+ model_type (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlVideoObjectTrackingInputs.ModelType):
"""
diff --git a/google/cloud/aiplatform_v1/__init__.py b/google/cloud/aiplatform_v1/__init__.py
new file mode 100644
index 0000000000..1b0c76e834
--- /dev/null
+++ b/google/cloud/aiplatform_v1/__init__.py
@@ -0,0 +1,345 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from .services.dataset_service import DatasetServiceClient
+from .services.endpoint_service import EndpointServiceClient
+from .services.job_service import JobServiceClient
+from .services.migration_service import MigrationServiceClient
+from .services.model_service import ModelServiceClient
+from .services.pipeline_service import PipelineServiceClient
+from .services.prediction_service import PredictionServiceClient
+from .services.specialist_pool_service import SpecialistPoolServiceClient
+from .types.accelerator_type import AcceleratorType
+from .types.annotation import Annotation
+from .types.annotation_spec import AnnotationSpec
+from .types.batch_prediction_job import BatchPredictionJob
+from .types.completion_stats import CompletionStats
+from .types.custom_job import ContainerSpec
+from .types.custom_job import CustomJob
+from .types.custom_job import CustomJobSpec
+from .types.custom_job import PythonPackageSpec
+from .types.custom_job import Scheduling
+from .types.custom_job import WorkerPoolSpec
+from .types.data_item import DataItem
+from .types.data_labeling_job import ActiveLearningConfig
+from .types.data_labeling_job import DataLabelingJob
+from .types.data_labeling_job import SampleConfig
+from .types.data_labeling_job import TrainingConfig
+from .types.dataset import Dataset
+from .types.dataset import ExportDataConfig
+from .types.dataset import ImportDataConfig
+from .types.dataset_service import CreateDatasetOperationMetadata
+from .types.dataset_service import CreateDatasetRequest
+from .types.dataset_service import DeleteDatasetRequest
+from .types.dataset_service import ExportDataOperationMetadata
+from .types.dataset_service import ExportDataRequest
+from .types.dataset_service import ExportDataResponse
+from .types.dataset_service import GetAnnotationSpecRequest
+from .types.dataset_service import GetDatasetRequest
+from .types.dataset_service import ImportDataOperationMetadata
+from .types.dataset_service import ImportDataRequest
+from .types.dataset_service import ImportDataResponse
+from .types.dataset_service import ListAnnotationsRequest
+from .types.dataset_service import ListAnnotationsResponse
+from .types.dataset_service import ListDataItemsRequest
+from .types.dataset_service import ListDataItemsResponse
+from .types.dataset_service import ListDatasetsRequest
+from .types.dataset_service import ListDatasetsResponse
+from .types.dataset_service import UpdateDatasetRequest
+from .types.deployed_model_ref import DeployedModelRef
+from .types.encryption_spec import EncryptionSpec
+from .types.endpoint import DeployedModel
+from .types.endpoint import Endpoint
+from .types.endpoint_service import CreateEndpointOperationMetadata
+from .types.endpoint_service import CreateEndpointRequest
+from .types.endpoint_service import DeleteEndpointRequest
+from .types.endpoint_service import DeployModelOperationMetadata
+from .types.endpoint_service import DeployModelRequest
+from .types.endpoint_service import DeployModelResponse
+from .types.endpoint_service import GetEndpointRequest
+from .types.endpoint_service import ListEndpointsRequest
+from .types.endpoint_service import ListEndpointsResponse
+from .types.endpoint_service import UndeployModelOperationMetadata
+from .types.endpoint_service import UndeployModelRequest
+from .types.endpoint_service import UndeployModelResponse
+from .types.endpoint_service import UpdateEndpointRequest
+from .types.env_var import EnvVar
+from .types.hyperparameter_tuning_job import HyperparameterTuningJob
+from .types.io import BigQueryDestination
+from .types.io import BigQuerySource
+from .types.io import ContainerRegistryDestination
+from .types.io import GcsDestination
+from .types.io import GcsSource
+from .types.job_service import CancelBatchPredictionJobRequest
+from .types.job_service import CancelCustomJobRequest
+from .types.job_service import CancelDataLabelingJobRequest
+from .types.job_service import CancelHyperparameterTuningJobRequest
+from .types.job_service import CreateBatchPredictionJobRequest
+from .types.job_service import CreateCustomJobRequest
+from .types.job_service import CreateDataLabelingJobRequest
+from .types.job_service import CreateHyperparameterTuningJobRequest
+from .types.job_service import DeleteBatchPredictionJobRequest
+from .types.job_service import DeleteCustomJobRequest
+from .types.job_service import DeleteDataLabelingJobRequest
+from .types.job_service import DeleteHyperparameterTuningJobRequest
+from .types.job_service import GetBatchPredictionJobRequest
+from .types.job_service import GetCustomJobRequest
+from .types.job_service import GetDataLabelingJobRequest
+from .types.job_service import GetHyperparameterTuningJobRequest
+from .types.job_service import ListBatchPredictionJobsRequest
+from .types.job_service import ListBatchPredictionJobsResponse
+from .types.job_service import ListCustomJobsRequest
+from .types.job_service import ListCustomJobsResponse
+from .types.job_service import ListDataLabelingJobsRequest
+from .types.job_service import ListDataLabelingJobsResponse
+from .types.job_service import ListHyperparameterTuningJobsRequest
+from .types.job_service import ListHyperparameterTuningJobsResponse
+from .types.job_state import JobState
+from .types.machine_resources import AutomaticResources
+from .types.machine_resources import BatchDedicatedResources
+from .types.machine_resources import DedicatedResources
+from .types.machine_resources import DiskSpec
+from .types.machine_resources import MachineSpec
+from .types.machine_resources import ResourcesConsumed
+from .types.manual_batch_tuning_parameters import ManualBatchTuningParameters
+from .types.migratable_resource import MigratableResource
+from .types.migration_service import BatchMigrateResourcesOperationMetadata
+from .types.migration_service import BatchMigrateResourcesRequest
+from .types.migration_service import BatchMigrateResourcesResponse
+from .types.migration_service import MigrateResourceRequest
+from .types.migration_service import MigrateResourceResponse
+from .types.migration_service import SearchMigratableResourcesRequest
+from .types.migration_service import SearchMigratableResourcesResponse
+from .types.model import Model
+from .types.model import ModelContainerSpec
+from .types.model import Port
+from .types.model import PredictSchemata
+from .types.model_evaluation import ModelEvaluation
+from .types.model_evaluation_slice import ModelEvaluationSlice
+from .types.model_service import DeleteModelRequest
+from .types.model_service import ExportModelOperationMetadata
+from .types.model_service import ExportModelRequest
+from .types.model_service import ExportModelResponse
+from .types.model_service import GetModelEvaluationRequest
+from .types.model_service import GetModelEvaluationSliceRequest
+from .types.model_service import GetModelRequest
+from .types.model_service import ListModelEvaluationSlicesRequest
+from .types.model_service import ListModelEvaluationSlicesResponse
+from .types.model_service import ListModelEvaluationsRequest
+from .types.model_service import ListModelEvaluationsResponse
+from .types.model_service import ListModelsRequest
+from .types.model_service import ListModelsResponse
+from .types.model_service import UpdateModelRequest
+from .types.model_service import UploadModelOperationMetadata
+from .types.model_service import UploadModelRequest
+from .types.model_service import UploadModelResponse
+from .types.operation import DeleteOperationMetadata
+from .types.operation import GenericOperationMetadata
+from .types.pipeline_service import CancelTrainingPipelineRequest
+from .types.pipeline_service import CreateTrainingPipelineRequest
+from .types.pipeline_service import DeleteTrainingPipelineRequest
+from .types.pipeline_service import GetTrainingPipelineRequest
+from .types.pipeline_service import ListTrainingPipelinesRequest
+from .types.pipeline_service import ListTrainingPipelinesResponse
+from .types.pipeline_state import PipelineState
+from .types.prediction_service import PredictRequest
+from .types.prediction_service import PredictResponse
+from .types.specialist_pool import SpecialistPool
+from .types.specialist_pool_service import CreateSpecialistPoolOperationMetadata
+from .types.specialist_pool_service import CreateSpecialistPoolRequest
+from .types.specialist_pool_service import DeleteSpecialistPoolRequest
+from .types.specialist_pool_service import GetSpecialistPoolRequest
+from .types.specialist_pool_service import ListSpecialistPoolsRequest
+from .types.specialist_pool_service import ListSpecialistPoolsResponse
+from .types.specialist_pool_service import UpdateSpecialistPoolOperationMetadata
+from .types.specialist_pool_service import UpdateSpecialistPoolRequest
+from .types.study import Measurement
+from .types.study import StudySpec
+from .types.study import Trial
+from .types.training_pipeline import FilterSplit
+from .types.training_pipeline import FractionSplit
+from .types.training_pipeline import InputDataConfig
+from .types.training_pipeline import PredefinedSplit
+from .types.training_pipeline import TimestampSplit
+from .types.training_pipeline import TrainingPipeline
+from .types.user_action_reference import UserActionReference
+
+
+__all__ = (
+ "AcceleratorType",
+ "ActiveLearningConfig",
+ "Annotation",
+ "AnnotationSpec",
+ "AutomaticResources",
+ "BatchDedicatedResources",
+ "BatchMigrateResourcesOperationMetadata",
+ "BatchMigrateResourcesRequest",
+ "BatchMigrateResourcesResponse",
+ "BatchPredictionJob",
+ "BigQueryDestination",
+ "BigQuerySource",
+ "CancelBatchPredictionJobRequest",
+ "CancelCustomJobRequest",
+ "CancelDataLabelingJobRequest",
+ "CancelHyperparameterTuningJobRequest",
+ "CancelTrainingPipelineRequest",
+ "CompletionStats",
+ "ContainerRegistryDestination",
+ "ContainerSpec",
+ "CreateBatchPredictionJobRequest",
+ "CreateCustomJobRequest",
+ "CreateDataLabelingJobRequest",
+ "CreateDatasetOperationMetadata",
+ "CreateDatasetRequest",
+ "CreateEndpointOperationMetadata",
+ "CreateEndpointRequest",
+ "CreateHyperparameterTuningJobRequest",
+ "CreateSpecialistPoolOperationMetadata",
+ "CreateSpecialistPoolRequest",
+ "CreateTrainingPipelineRequest",
+ "CustomJob",
+ "CustomJobSpec",
+ "DataItem",
+ "DataLabelingJob",
+ "Dataset",
+ "DatasetServiceClient",
+ "DedicatedResources",
+ "DeleteBatchPredictionJobRequest",
+ "DeleteCustomJobRequest",
+ "DeleteDataLabelingJobRequest",
+ "DeleteDatasetRequest",
+ "DeleteEndpointRequest",
+ "DeleteHyperparameterTuningJobRequest",
+ "DeleteModelRequest",
+ "DeleteOperationMetadata",
+ "DeleteSpecialistPoolRequest",
+ "DeleteTrainingPipelineRequest",
+ "DeployModelOperationMetadata",
+ "DeployModelRequest",
+ "DeployModelResponse",
+ "DeployedModel",
+ "DeployedModelRef",
+ "DiskSpec",
+ "EncryptionSpec",
+ "Endpoint",
+ "EndpointServiceClient",
+ "EnvVar",
+ "ExportDataConfig",
+ "ExportDataOperationMetadata",
+ "ExportDataRequest",
+ "ExportDataResponse",
+ "ExportModelOperationMetadata",
+ "ExportModelRequest",
+ "ExportModelResponse",
+ "FilterSplit",
+ "FractionSplit",
+ "GcsDestination",
+ "GcsSource",
+ "GenericOperationMetadata",
+ "GetAnnotationSpecRequest",
+ "GetBatchPredictionJobRequest",
+ "GetCustomJobRequest",
+ "GetDataLabelingJobRequest",
+ "GetDatasetRequest",
+ "GetEndpointRequest",
+ "GetHyperparameterTuningJobRequest",
+ "GetModelEvaluationRequest",
+ "GetModelEvaluationSliceRequest",
+ "GetModelRequest",
+ "GetSpecialistPoolRequest",
+ "GetTrainingPipelineRequest",
+ "HyperparameterTuningJob",
+ "ImportDataConfig",
+ "ImportDataOperationMetadata",
+ "ImportDataRequest",
+ "ImportDataResponse",
+ "InputDataConfig",
+ "JobServiceClient",
+ "JobState",
+ "ListAnnotationsRequest",
+ "ListAnnotationsResponse",
+ "ListBatchPredictionJobsRequest",
+ "ListBatchPredictionJobsResponse",
+ "ListCustomJobsRequest",
+ "ListCustomJobsResponse",
+ "ListDataItemsRequest",
+ "ListDataItemsResponse",
+ "ListDataLabelingJobsRequest",
+ "ListDataLabelingJobsResponse",
+ "ListDatasetsRequest",
+ "ListDatasetsResponse",
+ "ListEndpointsRequest",
+ "ListEndpointsResponse",
+ "ListHyperparameterTuningJobsRequest",
+ "ListHyperparameterTuningJobsResponse",
+ "ListModelEvaluationSlicesRequest",
+ "ListModelEvaluationSlicesResponse",
+ "ListModelEvaluationsRequest",
+ "ListModelEvaluationsResponse",
+ "ListModelsRequest",
+ "ListModelsResponse",
+ "ListSpecialistPoolsRequest",
+ "ListSpecialistPoolsResponse",
+ "ListTrainingPipelinesRequest",
+ "ListTrainingPipelinesResponse",
+ "MachineSpec",
+ "ManualBatchTuningParameters",
+ "Measurement",
+ "MigratableResource",
+ "MigrateResourceRequest",
+ "MigrateResourceResponse",
+ "MigrationServiceClient",
+ "Model",
+ "ModelContainerSpec",
+ "ModelEvaluation",
+ "ModelEvaluationSlice",
+ "ModelServiceClient",
+ "PipelineServiceClient",
+ "PipelineState",
+ "Port",
+ "PredefinedSplit",
+ "PredictRequest",
+ "PredictResponse",
+ "PredictSchemata",
+ "PredictionServiceClient",
+ "PythonPackageSpec",
+ "ResourcesConsumed",
+ "SampleConfig",
+ "Scheduling",
+ "SearchMigratableResourcesRequest",
+ "SearchMigratableResourcesResponse",
+ "SpecialistPool",
+ "StudySpec",
+ "TimestampSplit",
+ "TrainingConfig",
+ "TrainingPipeline",
+ "Trial",
+ "UndeployModelOperationMetadata",
+ "UndeployModelRequest",
+ "UndeployModelResponse",
+ "UpdateDatasetRequest",
+ "UpdateEndpointRequest",
+ "UpdateModelRequest",
+ "UpdateSpecialistPoolOperationMetadata",
+ "UpdateSpecialistPoolRequest",
+ "UploadModelOperationMetadata",
+ "UploadModelRequest",
+ "UploadModelResponse",
+ "UserActionReference",
+ "WorkerPoolSpec",
+ "SpecialistPoolServiceClient",
+)
diff --git a/google/cloud/aiplatform_v1/py.typed b/google/cloud/aiplatform_v1/py.typed
new file mode 100644
index 0000000000..228f1c51c6
--- /dev/null
+++ b/google/cloud/aiplatform_v1/py.typed
@@ -0,0 +1,2 @@
+# Marker file for PEP 561.
+# The google-cloud-aiplatform package uses inline types.
diff --git a/google/cloud/aiplatform_v1/services/__init__.py b/google/cloud/aiplatform_v1/services/__init__.py
new file mode 100644
index 0000000000..42ffdf2bc4
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/__init__.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
diff --git a/google/cloud/aiplatform_v1/services/dataset_service/__init__.py b/google/cloud/aiplatform_v1/services/dataset_service/__init__.py
new file mode 100644
index 0000000000..597f654cb9
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/dataset_service/__init__.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from .client import DatasetServiceClient
+from .async_client import DatasetServiceAsyncClient
+
+__all__ = (
+ "DatasetServiceClient",
+ "DatasetServiceAsyncClient",
+)
diff --git a/google/cloud/aiplatform_v1/services/dataset_service/async_client.py b/google/cloud/aiplatform_v1/services/dataset_service/async_client.py
new file mode 100644
index 0000000000..d5b56b54f5
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/dataset_service/async_client.py
@@ -0,0 +1,1044 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from collections import OrderedDict
+import functools
+import re
+from typing import Dict, Sequence, Tuple, Type, Union
+import pkg_resources
+
+import google.api_core.client_options as ClientOptions # type: ignore
+from google.api_core import exceptions # type: ignore
+from google.api_core import gapic_v1 # type: ignore
+from google.api_core import retry as retries # type: ignore
+from google.auth import credentials # type: ignore
+from google.oauth2 import service_account # type: ignore
+
+from google.api_core import operation as ga_operation # type: ignore
+from google.api_core import operation_async # type: ignore
+from google.cloud.aiplatform_v1.services.dataset_service import pagers
+from google.cloud.aiplatform_v1.types import annotation
+from google.cloud.aiplatform_v1.types import annotation_spec
+from google.cloud.aiplatform_v1.types import data_item
+from google.cloud.aiplatform_v1.types import dataset
+from google.cloud.aiplatform_v1.types import dataset as gca_dataset
+from google.cloud.aiplatform_v1.types import dataset_service
+from google.cloud.aiplatform_v1.types import encryption_spec
+from google.cloud.aiplatform_v1.types import operation as gca_operation
+from google.protobuf import empty_pb2 as empty # type: ignore
+from google.protobuf import field_mask_pb2 as field_mask # type: ignore
+from google.protobuf import struct_pb2 as struct # type: ignore
+from google.protobuf import timestamp_pb2 as timestamp # type: ignore
+
+from .transports.base import DatasetServiceTransport, DEFAULT_CLIENT_INFO
+from .transports.grpc_asyncio import DatasetServiceGrpcAsyncIOTransport
+from .client import DatasetServiceClient
+
+
+class DatasetServiceAsyncClient:
+ """"""
+
+ _client: DatasetServiceClient
+
+ DEFAULT_ENDPOINT = DatasetServiceClient.DEFAULT_ENDPOINT
+ DEFAULT_MTLS_ENDPOINT = DatasetServiceClient.DEFAULT_MTLS_ENDPOINT
+
+ annotation_path = staticmethod(DatasetServiceClient.annotation_path)
+ parse_annotation_path = staticmethod(DatasetServiceClient.parse_annotation_path)
+ annotation_spec_path = staticmethod(DatasetServiceClient.annotation_spec_path)
+ parse_annotation_spec_path = staticmethod(
+ DatasetServiceClient.parse_annotation_spec_path
+ )
+ data_item_path = staticmethod(DatasetServiceClient.data_item_path)
+ parse_data_item_path = staticmethod(DatasetServiceClient.parse_data_item_path)
+ dataset_path = staticmethod(DatasetServiceClient.dataset_path)
+ parse_dataset_path = staticmethod(DatasetServiceClient.parse_dataset_path)
+
+ common_billing_account_path = staticmethod(
+ DatasetServiceClient.common_billing_account_path
+ )
+ parse_common_billing_account_path = staticmethod(
+ DatasetServiceClient.parse_common_billing_account_path
+ )
+
+ common_folder_path = staticmethod(DatasetServiceClient.common_folder_path)
+ parse_common_folder_path = staticmethod(
+ DatasetServiceClient.parse_common_folder_path
+ )
+
+ common_organization_path = staticmethod(
+ DatasetServiceClient.common_organization_path
+ )
+ parse_common_organization_path = staticmethod(
+ DatasetServiceClient.parse_common_organization_path
+ )
+
+ common_project_path = staticmethod(DatasetServiceClient.common_project_path)
+ parse_common_project_path = staticmethod(
+ DatasetServiceClient.parse_common_project_path
+ )
+
+ common_location_path = staticmethod(DatasetServiceClient.common_location_path)
+ parse_common_location_path = staticmethod(
+ DatasetServiceClient.parse_common_location_path
+ )
+
+ from_service_account_info = DatasetServiceClient.from_service_account_info
+ from_service_account_file = DatasetServiceClient.from_service_account_file
+ from_service_account_json = from_service_account_file
+
+ @property
+ def transport(self) -> DatasetServiceTransport:
+ """Return the transport used by the client instance.
+
+ Returns:
+ DatasetServiceTransport: The transport used by the client instance.
+ """
+ return self._client.transport
+
+ get_transport_class = functools.partial(
+ type(DatasetServiceClient).get_transport_class, type(DatasetServiceClient)
+ )
+
+ def __init__(
+ self,
+ *,
+ credentials: credentials.Credentials = None,
+ transport: Union[str, DatasetServiceTransport] = "grpc_asyncio",
+ client_options: ClientOptions = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ ) -> None:
+ """Instantiate the dataset service client.
+
+ Args:
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ transport (Union[str, ~.DatasetServiceTransport]): The
+ transport to use. If set to None, a transport is chosen
+ automatically.
+ client_options (ClientOptions): Custom options for the client. It
+ won't take effect if a ``transport`` instance is provided.
+ (1) The ``api_endpoint`` property can be used to override the
+ default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
+ environment variable can also be used to override the endpoint:
+ "always" (always use the default mTLS endpoint), "never" (always
+ use the default regular endpoint) and "auto" (auto switch to the
+ default mTLS endpoint if client certificate is present, this is
+ the default value). However, the ``api_endpoint`` property takes
+ precedence if provided.
+ (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
+ is "true", then the ``client_cert_source`` property can be used
+ to provide client certificate for mutual TLS transport. If
+ not provided, the default SSL client certificate will be used if
+ present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
+ set, no client certificate will be used.
+
+ Raises:
+ google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
+ creation failed for any reason.
+ """
+
+ self._client = DatasetServiceClient(
+ credentials=credentials,
+ transport=transport,
+ client_options=client_options,
+ client_info=client_info,
+ )
+
+ async def create_dataset(
+ self,
+ request: dataset_service.CreateDatasetRequest = None,
+ *,
+ parent: str = None,
+ dataset: gca_dataset.Dataset = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Creates a Dataset.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.CreateDatasetRequest`):
+ The request object. Request message for
+ ``DatasetService.CreateDataset``.
+ parent (:class:`str`):
+ Required. The resource name of the Location to create
+ the Dataset in. Format:
+ ``projects/{project}/locations/{location}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ dataset (:class:`google.cloud.aiplatform_v1.types.Dataset`):
+ Required. The Dataset to create.
+ This corresponds to the ``dataset`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:`google.cloud.aiplatform_v1.types.Dataset` A
+ collection of DataItems and Annotations on them.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent, dataset])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = dataset_service.CreateDatasetRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+ if dataset is not None:
+ request.dataset = dataset
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.create_dataset,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ gca_dataset.Dataset,
+ metadata_type=dataset_service.CreateDatasetOperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def get_dataset(
+ self,
+ request: dataset_service.GetDatasetRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> dataset.Dataset:
+ r"""Gets a Dataset.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.GetDatasetRequest`):
+ The request object. Request message for
+ ``DatasetService.GetDataset``.
+ name (:class:`str`):
+ Required. The name of the Dataset
+ resource.
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.types.Dataset:
+ A collection of DataItems and
+ Annotations on them.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = dataset_service.GetDatasetRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.get_dataset,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ async def update_dataset(
+ self,
+ request: dataset_service.UpdateDatasetRequest = None,
+ *,
+ dataset: gca_dataset.Dataset = None,
+ update_mask: field_mask.FieldMask = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> gca_dataset.Dataset:
+ r"""Updates a Dataset.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.UpdateDatasetRequest`):
+ The request object. Request message for
+ ``DatasetService.UpdateDataset``.
+ dataset (:class:`google.cloud.aiplatform_v1.types.Dataset`):
+ Required. The Dataset which replaces
+ the resource on the server.
+
+ This corresponds to the ``dataset`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
+ Required. The update mask applies to the resource. For
+ the ``FieldMask`` definition, see
+ `FieldMask `__.
+ Updatable fields:
+
+ - ``display_name``
+ - ``description``
+ - ``labels``
+
+ This corresponds to the ``update_mask`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.types.Dataset:
+ A collection of DataItems and
+ Annotations on them.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([dataset, update_mask])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = dataset_service.UpdateDatasetRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if dataset is not None:
+ request.dataset = dataset
+ if update_mask is not None:
+ request.update_mask = update_mask
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.update_dataset,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata(
+ (("dataset.name", request.dataset.name),)
+ ),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ async def list_datasets(
+ self,
+ request: dataset_service.ListDatasetsRequest = None,
+ *,
+ parent: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> pagers.ListDatasetsAsyncPager:
+ r"""Lists Datasets in a Location.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.ListDatasetsRequest`):
+ The request object. Request message for
+ ``DatasetService.ListDatasets``.
+ parent (:class:`str`):
+ Required. The name of the Dataset's parent resource.
+ Format: ``projects/{project}/locations/{location}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.services.dataset_service.pagers.ListDatasetsAsyncPager:
+ Response message for
+ ``DatasetService.ListDatasets``.
+
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = dataset_service.ListDatasetsRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.list_datasets,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__aiter__` convenience method.
+ response = pagers.ListDatasetsAsyncPager(
+ method=rpc, request=request, response=response, metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def delete_dataset(
+ self,
+ request: dataset_service.DeleteDatasetRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Deletes a Dataset.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.DeleteDatasetRequest`):
+ The request object. Request message for
+ ``DatasetService.DeleteDataset``.
+ name (:class:`str`):
+ Required. The resource name of the Dataset to delete.
+ Format:
+ ``projects/{project}/locations/{location}/datasets/{dataset}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
+ empty messages in your APIs. A typical example is to
+ use it as the request or the response type of an API
+ method. For instance:
+
+ service Foo {
+ rpc Bar(google.protobuf.Empty) returns
+ (google.protobuf.Empty);
+
+ }
+
+ The JSON representation for Empty is empty JSON
+ object {}.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = dataset_service.DeleteDatasetRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.delete_dataset,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ empty.Empty,
+ metadata_type=gca_operation.DeleteOperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def import_data(
+ self,
+ request: dataset_service.ImportDataRequest = None,
+ *,
+ name: str = None,
+ import_configs: Sequence[dataset.ImportDataConfig] = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Imports data into a Dataset.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.ImportDataRequest`):
+ The request object. Request message for
+ ``DatasetService.ImportData``.
+ name (:class:`str`):
+ Required. The name of the Dataset resource. Format:
+ ``projects/{project}/locations/{location}/datasets/{dataset}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ import_configs (:class:`Sequence[google.cloud.aiplatform_v1.types.ImportDataConfig]`):
+ Required. The desired input
+ locations. The contents of all input
+ locations will be imported in one batch.
+
+ This corresponds to the ``import_configs`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:`google.cloud.aiplatform_v1.types.ImportDataResponse`
+ Response message for
+ ``DatasetService.ImportData``.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name, import_configs])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = dataset_service.ImportDataRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ if import_configs:
+ request.import_configs.extend(import_configs)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.import_data,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ dataset_service.ImportDataResponse,
+ metadata_type=dataset_service.ImportDataOperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def export_data(
+ self,
+ request: dataset_service.ExportDataRequest = None,
+ *,
+ name: str = None,
+ export_config: dataset.ExportDataConfig = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Exports data from a Dataset.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.ExportDataRequest`):
+ The request object. Request message for
+ ``DatasetService.ExportData``.
+ name (:class:`str`):
+ Required. The name of the Dataset resource. Format:
+ ``projects/{project}/locations/{location}/datasets/{dataset}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ export_config (:class:`google.cloud.aiplatform_v1.types.ExportDataConfig`):
+ Required. The desired output
+ location.
+
+ This corresponds to the ``export_config`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:`google.cloud.aiplatform_v1.types.ExportDataResponse`
+ Response message for
+ ``DatasetService.ExportData``.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name, export_config])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = dataset_service.ExportDataRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+ if export_config is not None:
+ request.export_config = export_config
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.export_data,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ dataset_service.ExportDataResponse,
+ metadata_type=dataset_service.ExportDataOperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def list_data_items(
+ self,
+ request: dataset_service.ListDataItemsRequest = None,
+ *,
+ parent: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> pagers.ListDataItemsAsyncPager:
+ r"""Lists DataItems in a Dataset.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.ListDataItemsRequest`):
+ The request object. Request message for
+ ``DatasetService.ListDataItems``.
+ parent (:class:`str`):
+ Required. The resource name of the Dataset to list
+ DataItems from. Format:
+ ``projects/{project}/locations/{location}/datasets/{dataset}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.services.dataset_service.pagers.ListDataItemsAsyncPager:
+ Response message for
+ ``DatasetService.ListDataItems``.
+
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = dataset_service.ListDataItemsRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.list_data_items,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__aiter__` convenience method.
+ response = pagers.ListDataItemsAsyncPager(
+ method=rpc, request=request, response=response, metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def get_annotation_spec(
+ self,
+ request: dataset_service.GetAnnotationSpecRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> annotation_spec.AnnotationSpec:
+ r"""Gets an AnnotationSpec.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.GetAnnotationSpecRequest`):
+ The request object. Request message for
+ ``DatasetService.GetAnnotationSpec``.
+ name (:class:`str`):
+ Required. The name of the AnnotationSpec resource.
+ Format:
+
+ ``projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.types.AnnotationSpec:
+ Identifies a concept with which
+ DataItems may be annotated with.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = dataset_service.GetAnnotationSpecRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.get_annotation_spec,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ async def list_annotations(
+ self,
+ request: dataset_service.ListAnnotationsRequest = None,
+ *,
+ parent: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> pagers.ListAnnotationsAsyncPager:
+ r"""Lists Annotations belongs to a dataitem
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.ListAnnotationsRequest`):
+ The request object. Request message for
+ ``DatasetService.ListAnnotations``.
+ parent (:class:`str`):
+ Required. The resource name of the DataItem to list
+ Annotations from. Format:
+
+ ``projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.services.dataset_service.pagers.ListAnnotationsAsyncPager:
+ Response message for
+ ``DatasetService.ListAnnotations``.
+
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = dataset_service.ListAnnotationsRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.list_annotations,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__aiter__` convenience method.
+ response = pagers.ListAnnotationsAsyncPager(
+ method=rpc, request=request, response=response, metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+
+try:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=pkg_resources.get_distribution(
+ "google-cloud-aiplatform",
+ ).version,
+ )
+except pkg_resources.DistributionNotFound:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
+
+
+__all__ = ("DatasetServiceAsyncClient",)
diff --git a/google/cloud/aiplatform_v1/services/dataset_service/client.py b/google/cloud/aiplatform_v1/services/dataset_service/client.py
new file mode 100644
index 0000000000..e545dbe56e
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/dataset_service/client.py
@@ -0,0 +1,1309 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from collections import OrderedDict
+from distutils import util
+import os
+import re
+from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
+import pkg_resources
+
+from google.api_core import client_options as client_options_lib # type: ignore
+from google.api_core import exceptions # type: ignore
+from google.api_core import gapic_v1 # type: ignore
+from google.api_core import retry as retries # type: ignore
+from google.auth import credentials # type: ignore
+from google.auth.transport import mtls # type: ignore
+from google.auth.transport.grpc import SslCredentials # type: ignore
+from google.auth.exceptions import MutualTLSChannelError # type: ignore
+from google.oauth2 import service_account # type: ignore
+
+from google.api_core import operation as ga_operation # type: ignore
+from google.api_core import operation_async # type: ignore
+from google.cloud.aiplatform_v1.services.dataset_service import pagers
+from google.cloud.aiplatform_v1.types import annotation
+from google.cloud.aiplatform_v1.types import annotation_spec
+from google.cloud.aiplatform_v1.types import data_item
+from google.cloud.aiplatform_v1.types import dataset
+from google.cloud.aiplatform_v1.types import dataset as gca_dataset
+from google.cloud.aiplatform_v1.types import dataset_service
+from google.cloud.aiplatform_v1.types import encryption_spec
+from google.cloud.aiplatform_v1.types import operation as gca_operation
+from google.protobuf import empty_pb2 as empty # type: ignore
+from google.protobuf import field_mask_pb2 as field_mask # type: ignore
+from google.protobuf import struct_pb2 as struct # type: ignore
+from google.protobuf import timestamp_pb2 as timestamp # type: ignore
+
+from .transports.base import DatasetServiceTransport, DEFAULT_CLIENT_INFO
+from .transports.grpc import DatasetServiceGrpcTransport
+from .transports.grpc_asyncio import DatasetServiceGrpcAsyncIOTransport
+
+
+class DatasetServiceClientMeta(type):
+ """Metaclass for the DatasetService client.
+
+ This provides class-level methods for building and retrieving
+ support objects (e.g. transport) without polluting the client instance
+ objects.
+ """
+
+ _transport_registry = (
+ OrderedDict()
+ ) # type: Dict[str, Type[DatasetServiceTransport]]
+ _transport_registry["grpc"] = DatasetServiceGrpcTransport
+ _transport_registry["grpc_asyncio"] = DatasetServiceGrpcAsyncIOTransport
+
+ def get_transport_class(cls, label: str = None,) -> Type[DatasetServiceTransport]:
+ """Return an appropriate transport class.
+
+ Args:
+ label: The name of the desired transport. If none is
+ provided, then the first transport in the registry is used.
+
+ Returns:
+ The transport class to use.
+ """
+ # If a specific transport is requested, return that one.
+ if label:
+ return cls._transport_registry[label]
+
+ # No transport is requested; return the default (that is, the first one
+ # in the dictionary).
+ return next(iter(cls._transport_registry.values()))
+
+
+class DatasetServiceClient(metaclass=DatasetServiceClientMeta):
+ """"""
+
+ @staticmethod
+ def _get_default_mtls_endpoint(api_endpoint):
+ """Convert api endpoint to mTLS endpoint.
+ Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
+ "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
+ Args:
+ api_endpoint (Optional[str]): the api endpoint to convert.
+ Returns:
+ str: converted mTLS api endpoint.
+ """
+ if not api_endpoint:
+ return api_endpoint
+
+ mtls_endpoint_re = re.compile(
+ r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?"
+ )
+
+ m = mtls_endpoint_re.match(api_endpoint)
+ name, mtls, sandbox, googledomain = m.groups()
+ if mtls or not googledomain:
+ return api_endpoint
+
+ if sandbox:
+ return api_endpoint.replace(
+ "sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
+ )
+
+ return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
+
+ DEFAULT_ENDPOINT = "aiplatform.googleapis.com"
+ DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
+ DEFAULT_ENDPOINT
+ )
+
+ @classmethod
+ def from_service_account_info(cls, info: dict, *args, **kwargs):
+ """Creates an instance of this client using the provided credentials info.
+
+ Args:
+ info (dict): The service account private key info.
+ args: Additional arguments to pass to the constructor.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ DatasetServiceClient: The constructed client.
+ """
+ credentials = service_account.Credentials.from_service_account_info(info)
+ kwargs["credentials"] = credentials
+ return cls(*args, **kwargs)
+
+ @classmethod
+ def from_service_account_file(cls, filename: str, *args, **kwargs):
+ """Creates an instance of this client using the provided credentials
+ file.
+
+ Args:
+ filename (str): The path to the service account private key json
+ file.
+ args: Additional arguments to pass to the constructor.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ DatasetServiceClient: The constructed client.
+ """
+ credentials = service_account.Credentials.from_service_account_file(filename)
+ kwargs["credentials"] = credentials
+ return cls(*args, **kwargs)
+
+ from_service_account_json = from_service_account_file
+
+ @property
+ def transport(self) -> DatasetServiceTransport:
+ """Return the transport used by the client instance.
+
+ Returns:
+ DatasetServiceTransport: The transport used by the client instance.
+ """
+ return self._transport
+
+ @staticmethod
+ def annotation_path(
+ project: str, location: str, dataset: str, data_item: str, annotation: str,
+ ) -> str:
+ """Return a fully-qualified annotation string."""
+ return "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}/annotations/{annotation}".format(
+ project=project,
+ location=location,
+ dataset=dataset,
+ data_item=data_item,
+ annotation=annotation,
+ )
+
+ @staticmethod
+ def parse_annotation_path(path: str) -> Dict[str, str]:
+ """Parse a annotation path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/dataItems/(?P.+?)/annotations/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def annotation_spec_path(
+ project: str, location: str, dataset: str, annotation_spec: str,
+ ) -> str:
+ """Return a fully-qualified annotation_spec string."""
+ return "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}".format(
+ project=project,
+ location=location,
+ dataset=dataset,
+ annotation_spec=annotation_spec,
+ )
+
+ @staticmethod
+ def parse_annotation_spec_path(path: str) -> Dict[str, str]:
+ """Parse a annotation_spec path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/annotationSpecs/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def data_item_path(
+ project: str, location: str, dataset: str, data_item: str,
+ ) -> str:
+ """Return a fully-qualified data_item string."""
+ return "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}".format(
+ project=project, location=location, dataset=dataset, data_item=data_item,
+ )
+
+ @staticmethod
+ def parse_data_item_path(path: str) -> Dict[str, str]:
+ """Parse a data_item path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/dataItems/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def dataset_path(project: str, location: str, dataset: str,) -> str:
+ """Return a fully-qualified dataset string."""
+ return "projects/{project}/locations/{location}/datasets/{dataset}".format(
+ project=project, location=location, dataset=dataset,
+ )
+
+ @staticmethod
+ def parse_dataset_path(path: str) -> Dict[str, str]:
+ """Parse a dataset path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_billing_account_path(billing_account: str,) -> str:
+ """Return a fully-qualified billing_account string."""
+ return "billingAccounts/{billing_account}".format(
+ billing_account=billing_account,
+ )
+
+ @staticmethod
+ def parse_common_billing_account_path(path: str) -> Dict[str, str]:
+ """Parse a billing_account path into its component segments."""
+ m = re.match(r"^billingAccounts/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_folder_path(folder: str,) -> str:
+ """Return a fully-qualified folder string."""
+ return "folders/{folder}".format(folder=folder,)
+
+ @staticmethod
+ def parse_common_folder_path(path: str) -> Dict[str, str]:
+ """Parse a folder path into its component segments."""
+ m = re.match(r"^folders/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_organization_path(organization: str,) -> str:
+ """Return a fully-qualified organization string."""
+ return "organizations/{organization}".format(organization=organization,)
+
+ @staticmethod
+ def parse_common_organization_path(path: str) -> Dict[str, str]:
+ """Parse a organization path into its component segments."""
+ m = re.match(r"^organizations/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_project_path(project: str,) -> str:
+ """Return a fully-qualified project string."""
+ return "projects/{project}".format(project=project,)
+
+ @staticmethod
+ def parse_common_project_path(path: str) -> Dict[str, str]:
+ """Parse a project path into its component segments."""
+ m = re.match(r"^projects/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_location_path(project: str, location: str,) -> str:
+ """Return a fully-qualified location string."""
+ return "projects/{project}/locations/{location}".format(
+ project=project, location=location,
+ )
+
+ @staticmethod
+ def parse_common_location_path(path: str) -> Dict[str, str]:
+ """Parse a location path into its component segments."""
+ m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ def __init__(
+ self,
+ *,
+ credentials: Optional[credentials.Credentials] = None,
+ transport: Union[str, DatasetServiceTransport, None] = None,
+ client_options: Optional[client_options_lib.ClientOptions] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ ) -> None:
+ """Instantiate the dataset service client.
+
+ Args:
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ transport (Union[str, DatasetServiceTransport]): The
+ transport to use. If set to None, a transport is chosen
+ automatically.
+ client_options (google.api_core.client_options.ClientOptions): Custom options for the
+ client. It won't take effect if a ``transport`` instance is provided.
+ (1) The ``api_endpoint`` property can be used to override the
+ default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
+ environment variable can also be used to override the endpoint:
+ "always" (always use the default mTLS endpoint), "never" (always
+ use the default regular endpoint) and "auto" (auto switch to the
+ default mTLS endpoint if client certificate is present, this is
+ the default value). However, the ``api_endpoint`` property takes
+ precedence if provided.
+ (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
+ is "true", then the ``client_cert_source`` property can be used
+ to provide client certificate for mutual TLS transport. If
+ not provided, the default SSL client certificate will be used if
+ present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
+ set, no client certificate will be used.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+
+ Raises:
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
+ creation failed for any reason.
+ """
+ if isinstance(client_options, dict):
+ client_options = client_options_lib.from_dict(client_options)
+ if client_options is None:
+ client_options = client_options_lib.ClientOptions()
+
+ # Create SSL credentials for mutual TLS if needed.
+ use_client_cert = bool(
+ util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
+ )
+
+ client_cert_source_func = None
+ is_mtls = False
+ if use_client_cert:
+ if client_options.client_cert_source:
+ is_mtls = True
+ client_cert_source_func = client_options.client_cert_source
+ else:
+ is_mtls = mtls.has_default_client_cert_source()
+ client_cert_source_func = (
+ mtls.default_client_cert_source() if is_mtls else None
+ )
+
+ # Figure out which api endpoint to use.
+ if client_options.api_endpoint is not None:
+ api_endpoint = client_options.api_endpoint
+ else:
+ use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
+ if use_mtls_env == "never":
+ api_endpoint = self.DEFAULT_ENDPOINT
+ elif use_mtls_env == "always":
+ api_endpoint = self.DEFAULT_MTLS_ENDPOINT
+ elif use_mtls_env == "auto":
+ api_endpoint = (
+ self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT
+ )
+ else:
+ raise MutualTLSChannelError(
+ "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
+ )
+
+ # Save or instantiate the transport.
+ # Ordinarily, we provide the transport, but allowing a custom transport
+ # instance provides an extensibility point for unusual situations.
+ if isinstance(transport, DatasetServiceTransport):
+ # transport is a DatasetServiceTransport instance.
+ if credentials or client_options.credentials_file:
+ raise ValueError(
+ "When providing a transport instance, "
+ "provide its credentials directly."
+ )
+ if client_options.scopes:
+ raise ValueError(
+ "When providing a transport instance, "
+ "provide its scopes directly."
+ )
+ self._transport = transport
+ else:
+ Transport = type(self).get_transport_class(transport)
+ self._transport = Transport(
+ credentials=credentials,
+ credentials_file=client_options.credentials_file,
+ host=api_endpoint,
+ scopes=client_options.scopes,
+ client_cert_source_for_mtls=client_cert_source_func,
+ quota_project_id=client_options.quota_project_id,
+ client_info=client_info,
+ )
+
+ def create_dataset(
+ self,
+ request: dataset_service.CreateDatasetRequest = None,
+ *,
+ parent: str = None,
+ dataset: gca_dataset.Dataset = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> ga_operation.Operation:
+ r"""Creates a Dataset.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.CreateDatasetRequest):
+ The request object. Request message for
+ ``DatasetService.CreateDataset``.
+ parent (str):
+ Required. The resource name of the Location to create
+ the Dataset in. Format:
+ ``projects/{project}/locations/{location}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ dataset (google.cloud.aiplatform_v1.types.Dataset):
+ Required. The Dataset to create.
+ This corresponds to the ``dataset`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation.Operation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:`google.cloud.aiplatform_v1.types.Dataset` A
+ collection of DataItems and Annotations on them.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent, dataset])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a dataset_service.CreateDatasetRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, dataset_service.CreateDatasetRequest):
+ request = dataset_service.CreateDatasetRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+ if dataset is not None:
+ request.dataset = dataset
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.create_dataset]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = ga_operation.from_gapic(
+ response,
+ self._transport.operations_client,
+ gca_dataset.Dataset,
+ metadata_type=dataset_service.CreateDatasetOperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def get_dataset(
+ self,
+ request: dataset_service.GetDatasetRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> dataset.Dataset:
+ r"""Gets a Dataset.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.GetDatasetRequest):
+ The request object. Request message for
+ ``DatasetService.GetDataset``.
+ name (str):
+ Required. The name of the Dataset
+ resource.
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.types.Dataset:
+ A collection of DataItems and
+ Annotations on them.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a dataset_service.GetDatasetRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, dataset_service.GetDatasetRequest):
+ request = dataset_service.GetDatasetRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.get_dataset]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ def update_dataset(
+ self,
+ request: dataset_service.UpdateDatasetRequest = None,
+ *,
+ dataset: gca_dataset.Dataset = None,
+ update_mask: field_mask.FieldMask = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> gca_dataset.Dataset:
+ r"""Updates a Dataset.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.UpdateDatasetRequest):
+ The request object. Request message for
+ ``DatasetService.UpdateDataset``.
+ dataset (google.cloud.aiplatform_v1.types.Dataset):
+ Required. The Dataset which replaces
+ the resource on the server.
+
+ This corresponds to the ``dataset`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ update_mask (google.protobuf.field_mask_pb2.FieldMask):
+ Required. The update mask applies to the resource. For
+ the ``FieldMask`` definition, see
+ `FieldMask `__.
+ Updatable fields:
+
+ - ``display_name``
+ - ``description``
+ - ``labels``
+
+ This corresponds to the ``update_mask`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.types.Dataset:
+ A collection of DataItems and
+ Annotations on them.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([dataset, update_mask])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a dataset_service.UpdateDatasetRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, dataset_service.UpdateDatasetRequest):
+ request = dataset_service.UpdateDatasetRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if dataset is not None:
+ request.dataset = dataset
+ if update_mask is not None:
+ request.update_mask = update_mask
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.update_dataset]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata(
+ (("dataset.name", request.dataset.name),)
+ ),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ def list_datasets(
+ self,
+ request: dataset_service.ListDatasetsRequest = None,
+ *,
+ parent: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> pagers.ListDatasetsPager:
+ r"""Lists Datasets in a Location.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.ListDatasetsRequest):
+ The request object. Request message for
+ ``DatasetService.ListDatasets``.
+ parent (str):
+ Required. The name of the Dataset's parent resource.
+ Format: ``projects/{project}/locations/{location}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.services.dataset_service.pagers.ListDatasetsPager:
+ Response message for
+ ``DatasetService.ListDatasets``.
+
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a dataset_service.ListDatasetsRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, dataset_service.ListDatasetsRequest):
+ request = dataset_service.ListDatasetsRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.list_datasets]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__iter__` convenience method.
+ response = pagers.ListDatasetsPager(
+ method=rpc, request=request, response=response, metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def delete_dataset(
+ self,
+ request: dataset_service.DeleteDatasetRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> ga_operation.Operation:
+ r"""Deletes a Dataset.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.DeleteDatasetRequest):
+ The request object. Request message for
+ ``DatasetService.DeleteDataset``.
+ name (str):
+ Required. The resource name of the Dataset to delete.
+ Format:
+ ``projects/{project}/locations/{location}/datasets/{dataset}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation.Operation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
+ empty messages in your APIs. A typical example is to
+ use it as the request or the response type of an API
+ method. For instance:
+
+ service Foo {
+ rpc Bar(google.protobuf.Empty) returns
+ (google.protobuf.Empty);
+
+ }
+
+ The JSON representation for Empty is empty JSON
+ object {}.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a dataset_service.DeleteDatasetRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, dataset_service.DeleteDatasetRequest):
+ request = dataset_service.DeleteDatasetRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.delete_dataset]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = ga_operation.from_gapic(
+ response,
+ self._transport.operations_client,
+ empty.Empty,
+ metadata_type=gca_operation.DeleteOperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def import_data(
+ self,
+ request: dataset_service.ImportDataRequest = None,
+ *,
+ name: str = None,
+ import_configs: Sequence[dataset.ImportDataConfig] = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> ga_operation.Operation:
+ r"""Imports data into a Dataset.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.ImportDataRequest):
+ The request object. Request message for
+ ``DatasetService.ImportData``.
+ name (str):
+ Required. The name of the Dataset resource. Format:
+ ``projects/{project}/locations/{location}/datasets/{dataset}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ import_configs (Sequence[google.cloud.aiplatform_v1.types.ImportDataConfig]):
+ Required. The desired input
+ locations. The contents of all input
+ locations will be imported in one batch.
+
+ This corresponds to the ``import_configs`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation.Operation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:`google.cloud.aiplatform_v1.types.ImportDataResponse`
+ Response message for
+ ``DatasetService.ImportData``.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name, import_configs])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a dataset_service.ImportDataRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, dataset_service.ImportDataRequest):
+ request = dataset_service.ImportDataRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ if import_configs:
+ request.import_configs.extend(import_configs)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.import_data]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = ga_operation.from_gapic(
+ response,
+ self._transport.operations_client,
+ dataset_service.ImportDataResponse,
+ metadata_type=dataset_service.ImportDataOperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def export_data(
+ self,
+ request: dataset_service.ExportDataRequest = None,
+ *,
+ name: str = None,
+ export_config: dataset.ExportDataConfig = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> ga_operation.Operation:
+ r"""Exports data from a Dataset.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.ExportDataRequest):
+ The request object. Request message for
+ ``DatasetService.ExportData``.
+ name (str):
+ Required. The name of the Dataset resource. Format:
+ ``projects/{project}/locations/{location}/datasets/{dataset}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ export_config (google.cloud.aiplatform_v1.types.ExportDataConfig):
+ Required. The desired output
+ location.
+
+ This corresponds to the ``export_config`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation.Operation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:`google.cloud.aiplatform_v1.types.ExportDataResponse`
+ Response message for
+ ``DatasetService.ExportData``.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name, export_config])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a dataset_service.ExportDataRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, dataset_service.ExportDataRequest):
+ request = dataset_service.ExportDataRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+ if export_config is not None:
+ request.export_config = export_config
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.export_data]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = ga_operation.from_gapic(
+ response,
+ self._transport.operations_client,
+ dataset_service.ExportDataResponse,
+ metadata_type=dataset_service.ExportDataOperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def list_data_items(
+ self,
+ request: dataset_service.ListDataItemsRequest = None,
+ *,
+ parent: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> pagers.ListDataItemsPager:
+ r"""Lists DataItems in a Dataset.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.ListDataItemsRequest):
+ The request object. Request message for
+ ``DatasetService.ListDataItems``.
+ parent (str):
+ Required. The resource name of the Dataset to list
+ DataItems from. Format:
+ ``projects/{project}/locations/{location}/datasets/{dataset}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.services.dataset_service.pagers.ListDataItemsPager:
+ Response message for
+ ``DatasetService.ListDataItems``.
+
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a dataset_service.ListDataItemsRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, dataset_service.ListDataItemsRequest):
+ request = dataset_service.ListDataItemsRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.list_data_items]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__iter__` convenience method.
+ response = pagers.ListDataItemsPager(
+ method=rpc, request=request, response=response, metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def get_annotation_spec(
+ self,
+ request: dataset_service.GetAnnotationSpecRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> annotation_spec.AnnotationSpec:
+ r"""Gets an AnnotationSpec.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.GetAnnotationSpecRequest):
+ The request object. Request message for
+ ``DatasetService.GetAnnotationSpec``.
+ name (str):
+ Required. The name of the AnnotationSpec resource.
+ Format:
+
+ ``projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.types.AnnotationSpec:
+ Identifies a concept with which
+ DataItems may be annotated with.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a dataset_service.GetAnnotationSpecRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, dataset_service.GetAnnotationSpecRequest):
+ request = dataset_service.GetAnnotationSpecRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.get_annotation_spec]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ def list_annotations(
+ self,
+ request: dataset_service.ListAnnotationsRequest = None,
+ *,
+ parent: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> pagers.ListAnnotationsPager:
+ r"""Lists Annotations belongs to a dataitem
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.ListAnnotationsRequest):
+ The request object. Request message for
+ ``DatasetService.ListAnnotations``.
+ parent (str):
+ Required. The resource name of the DataItem to list
+ Annotations from. Format:
+
+ ``projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.services.dataset_service.pagers.ListAnnotationsPager:
+ Response message for
+ ``DatasetService.ListAnnotations``.
+
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a dataset_service.ListAnnotationsRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, dataset_service.ListAnnotationsRequest):
+ request = dataset_service.ListAnnotationsRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.list_annotations]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__iter__` convenience method.
+ response = pagers.ListAnnotationsPager(
+ method=rpc, request=request, response=response, metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+
+try:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=pkg_resources.get_distribution(
+ "google-cloud-aiplatform",
+ ).version,
+ )
+except pkg_resources.DistributionNotFound:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
+
+
+__all__ = ("DatasetServiceClient",)
diff --git a/google/cloud/aiplatform_v1/services/dataset_service/pagers.py b/google/cloud/aiplatform_v1/services/dataset_service/pagers.py
new file mode 100644
index 0000000000..f195ca3308
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/dataset_service/pagers.py
@@ -0,0 +1,407 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple
+
+from google.cloud.aiplatform_v1.types import annotation
+from google.cloud.aiplatform_v1.types import data_item
+from google.cloud.aiplatform_v1.types import dataset
+from google.cloud.aiplatform_v1.types import dataset_service
+
+
+class ListDatasetsPager:
+ """A pager for iterating through ``list_datasets`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.cloud.aiplatform_v1.types.ListDatasetsResponse` object, and
+ provides an ``__iter__`` method to iterate through its
+ ``datasets`` field.
+
+ If there are more pages, the ``__iter__`` method will make additional
+ ``ListDatasets`` requests and continue to iterate
+ through the ``datasets`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.cloud.aiplatform_v1.types.ListDatasetsResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[..., dataset_service.ListDatasetsResponse],
+ request: dataset_service.ListDatasetsRequest,
+ response: dataset_service.ListDatasetsResponse,
+ *,
+ metadata: Sequence[Tuple[str, str]] = ()
+ ):
+ """Instantiate the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.cloud.aiplatform_v1.types.ListDatasetsRequest):
+ The initial request object.
+ response (google.cloud.aiplatform_v1.types.ListDatasetsResponse):
+ The initial response object.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ self._method = method
+ self._request = dataset_service.ListDatasetsRequest(request)
+ self._response = response
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ def pages(self) -> Iterable[dataset_service.ListDatasetsResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = self._method(self._request, metadata=self._metadata)
+ yield self._response
+
+ def __iter__(self) -> Iterable[dataset.Dataset]:
+ for page in self.pages:
+ yield from page.datasets
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
+
+
+class ListDatasetsAsyncPager:
+ """A pager for iterating through ``list_datasets`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.cloud.aiplatform_v1.types.ListDatasetsResponse` object, and
+ provides an ``__aiter__`` method to iterate through its
+ ``datasets`` field.
+
+ If there are more pages, the ``__aiter__`` method will make additional
+ ``ListDatasets`` requests and continue to iterate
+ through the ``datasets`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.cloud.aiplatform_v1.types.ListDatasetsResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[..., Awaitable[dataset_service.ListDatasetsResponse]],
+ request: dataset_service.ListDatasetsRequest,
+ response: dataset_service.ListDatasetsResponse,
+ *,
+ metadata: Sequence[Tuple[str, str]] = ()
+ ):
+ """Instantiate the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.cloud.aiplatform_v1.types.ListDatasetsRequest):
+ The initial request object.
+ response (google.cloud.aiplatform_v1.types.ListDatasetsResponse):
+ The initial response object.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ self._method = method
+ self._request = dataset_service.ListDatasetsRequest(request)
+ self._response = response
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ async def pages(self) -> AsyncIterable[dataset_service.ListDatasetsResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = await self._method(self._request, metadata=self._metadata)
+ yield self._response
+
+ def __aiter__(self) -> AsyncIterable[dataset.Dataset]:
+ async def async_generator():
+ async for page in self.pages:
+ for response in page.datasets:
+ yield response
+
+ return async_generator()
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
+
+
+class ListDataItemsPager:
+ """A pager for iterating through ``list_data_items`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.cloud.aiplatform_v1.types.ListDataItemsResponse` object, and
+ provides an ``__iter__`` method to iterate through its
+ ``data_items`` field.
+
+ If there are more pages, the ``__iter__`` method will make additional
+ ``ListDataItems`` requests and continue to iterate
+ through the ``data_items`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.cloud.aiplatform_v1.types.ListDataItemsResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[..., dataset_service.ListDataItemsResponse],
+ request: dataset_service.ListDataItemsRequest,
+ response: dataset_service.ListDataItemsResponse,
+ *,
+ metadata: Sequence[Tuple[str, str]] = ()
+ ):
+ """Instantiate the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.cloud.aiplatform_v1.types.ListDataItemsRequest):
+ The initial request object.
+ response (google.cloud.aiplatform_v1.types.ListDataItemsResponse):
+ The initial response object.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ self._method = method
+ self._request = dataset_service.ListDataItemsRequest(request)
+ self._response = response
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ def pages(self) -> Iterable[dataset_service.ListDataItemsResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = self._method(self._request, metadata=self._metadata)
+ yield self._response
+
+ def __iter__(self) -> Iterable[data_item.DataItem]:
+ for page in self.pages:
+ yield from page.data_items
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
+
+
+class ListDataItemsAsyncPager:
+ """A pager for iterating through ``list_data_items`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.cloud.aiplatform_v1.types.ListDataItemsResponse` object, and
+ provides an ``__aiter__`` method to iterate through its
+ ``data_items`` field.
+
+ If there are more pages, the ``__aiter__`` method will make additional
+ ``ListDataItems`` requests and continue to iterate
+ through the ``data_items`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.cloud.aiplatform_v1.types.ListDataItemsResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[..., Awaitable[dataset_service.ListDataItemsResponse]],
+ request: dataset_service.ListDataItemsRequest,
+ response: dataset_service.ListDataItemsResponse,
+ *,
+ metadata: Sequence[Tuple[str, str]] = ()
+ ):
+ """Instantiate the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.cloud.aiplatform_v1.types.ListDataItemsRequest):
+ The initial request object.
+ response (google.cloud.aiplatform_v1.types.ListDataItemsResponse):
+ The initial response object.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ self._method = method
+ self._request = dataset_service.ListDataItemsRequest(request)
+ self._response = response
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ async def pages(self) -> AsyncIterable[dataset_service.ListDataItemsResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = await self._method(self._request, metadata=self._metadata)
+ yield self._response
+
+ def __aiter__(self) -> AsyncIterable[data_item.DataItem]:
+ async def async_generator():
+ async for page in self.pages:
+ for response in page.data_items:
+ yield response
+
+ return async_generator()
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
+
+
+class ListAnnotationsPager:
+ """A pager for iterating through ``list_annotations`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.cloud.aiplatform_v1.types.ListAnnotationsResponse` object, and
+ provides an ``__iter__`` method to iterate through its
+ ``annotations`` field.
+
+ If there are more pages, the ``__iter__`` method will make additional
+ ``ListAnnotations`` requests and continue to iterate
+ through the ``annotations`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.cloud.aiplatform_v1.types.ListAnnotationsResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[..., dataset_service.ListAnnotationsResponse],
+ request: dataset_service.ListAnnotationsRequest,
+ response: dataset_service.ListAnnotationsResponse,
+ *,
+ metadata: Sequence[Tuple[str, str]] = ()
+ ):
+ """Instantiate the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.cloud.aiplatform_v1.types.ListAnnotationsRequest):
+ The initial request object.
+ response (google.cloud.aiplatform_v1.types.ListAnnotationsResponse):
+ The initial response object.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ self._method = method
+ self._request = dataset_service.ListAnnotationsRequest(request)
+ self._response = response
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ def pages(self) -> Iterable[dataset_service.ListAnnotationsResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = self._method(self._request, metadata=self._metadata)
+ yield self._response
+
+ def __iter__(self) -> Iterable[annotation.Annotation]:
+ for page in self.pages:
+ yield from page.annotations
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
+
+
+class ListAnnotationsAsyncPager:
+ """A pager for iterating through ``list_annotations`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.cloud.aiplatform_v1.types.ListAnnotationsResponse` object, and
+ provides an ``__aiter__`` method to iterate through its
+ ``annotations`` field.
+
+ If there are more pages, the ``__aiter__`` method will make additional
+ ``ListAnnotations`` requests and continue to iterate
+ through the ``annotations`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.cloud.aiplatform_v1.types.ListAnnotationsResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[..., Awaitable[dataset_service.ListAnnotationsResponse]],
+ request: dataset_service.ListAnnotationsRequest,
+ response: dataset_service.ListAnnotationsResponse,
+ *,
+ metadata: Sequence[Tuple[str, str]] = ()
+ ):
+ """Instantiate the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.cloud.aiplatform_v1.types.ListAnnotationsRequest):
+ The initial request object.
+ response (google.cloud.aiplatform_v1.types.ListAnnotationsResponse):
+ The initial response object.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ self._method = method
+ self._request = dataset_service.ListAnnotationsRequest(request)
+ self._response = response
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ async def pages(self) -> AsyncIterable[dataset_service.ListAnnotationsResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = await self._method(self._request, metadata=self._metadata)
+ yield self._response
+
+ def __aiter__(self) -> AsyncIterable[annotation.Annotation]:
+ async def async_generator():
+ async for page in self.pages:
+ for response in page.annotations:
+ yield response
+
+ return async_generator()
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
diff --git a/google/cloud/aiplatform_v1/services/dataset_service/transports/__init__.py b/google/cloud/aiplatform_v1/services/dataset_service/transports/__init__.py
new file mode 100644
index 0000000000..a4461d2ced
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/dataset_service/transports/__init__.py
@@ -0,0 +1,35 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from collections import OrderedDict
+from typing import Dict, Type
+
+from .base import DatasetServiceTransport
+from .grpc import DatasetServiceGrpcTransport
+from .grpc_asyncio import DatasetServiceGrpcAsyncIOTransport
+
+
+# Compile a registry of transports.
+_transport_registry = OrderedDict() # type: Dict[str, Type[DatasetServiceTransport]]
+_transport_registry["grpc"] = DatasetServiceGrpcTransport
+_transport_registry["grpc_asyncio"] = DatasetServiceGrpcAsyncIOTransport
+
+__all__ = (
+ "DatasetServiceTransport",
+ "DatasetServiceGrpcTransport",
+ "DatasetServiceGrpcAsyncIOTransport",
+)
diff --git a/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py b/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py
new file mode 100644
index 0000000000..2ab4419d03
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py
@@ -0,0 +1,254 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import abc
+import typing
+import pkg_resources
+
+from google import auth # type: ignore
+from google.api_core import exceptions # type: ignore
+from google.api_core import gapic_v1 # type: ignore
+from google.api_core import retry as retries # type: ignore
+from google.api_core import operations_v1 # type: ignore
+from google.auth import credentials # type: ignore
+
+from google.cloud.aiplatform_v1.types import annotation_spec
+from google.cloud.aiplatform_v1.types import dataset
+from google.cloud.aiplatform_v1.types import dataset as gca_dataset
+from google.cloud.aiplatform_v1.types import dataset_service
+from google.longrunning import operations_pb2 as operations # type: ignore
+
+
+try:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=pkg_resources.get_distribution(
+ "google-cloud-aiplatform",
+ ).version,
+ )
+except pkg_resources.DistributionNotFound:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
+
+
+class DatasetServiceTransport(abc.ABC):
+ """Abstract transport class for DatasetService."""
+
+ AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
+
+ def __init__(
+ self,
+ *,
+ host: str = "aiplatform.googleapis.com",
+ credentials: credentials.Credentials = None,
+ credentials_file: typing.Optional[str] = None,
+ scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES,
+ quota_project_id: typing.Optional[str] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ **kwargs,
+ ) -> None:
+ """Instantiate the transport.
+
+ Args:
+ host (Optional[str]): The hostname to connect to.
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is mutually exclusive with credentials.
+ scope (Optional[Sequence[str]]): A list of scopes.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+ """
+ # Save the hostname. Default to port 443 (HTTPS) if none is specified.
+ if ":" not in host:
+ host += ":443"
+ self._host = host
+
+ # If no credentials are provided, then determine the appropriate
+ # defaults.
+ if credentials and credentials_file:
+ raise exceptions.DuplicateCredentialArgs(
+ "'credentials_file' and 'credentials' are mutually exclusive"
+ )
+
+ if credentials_file is not None:
+ credentials, _ = auth.load_credentials_from_file(
+ credentials_file, scopes=scopes, quota_project_id=quota_project_id
+ )
+
+ elif credentials is None:
+ credentials, _ = auth.default(
+ scopes=scopes, quota_project_id=quota_project_id
+ )
+
+ # Save the credentials.
+ self._credentials = credentials
+
+ # Lifted into its own function so it can be stubbed out during tests.
+ self._prep_wrapped_messages(client_info)
+
+ def _prep_wrapped_messages(self, client_info):
+ # Precompute the wrapped methods.
+ self._wrapped_methods = {
+ self.create_dataset: gapic_v1.method.wrap_method(
+ self.create_dataset, default_timeout=None, client_info=client_info,
+ ),
+ self.get_dataset: gapic_v1.method.wrap_method(
+ self.get_dataset, default_timeout=None, client_info=client_info,
+ ),
+ self.update_dataset: gapic_v1.method.wrap_method(
+ self.update_dataset, default_timeout=None, client_info=client_info,
+ ),
+ self.list_datasets: gapic_v1.method.wrap_method(
+ self.list_datasets, default_timeout=None, client_info=client_info,
+ ),
+ self.delete_dataset: gapic_v1.method.wrap_method(
+ self.delete_dataset, default_timeout=None, client_info=client_info,
+ ),
+ self.import_data: gapic_v1.method.wrap_method(
+ self.import_data, default_timeout=None, client_info=client_info,
+ ),
+ self.export_data: gapic_v1.method.wrap_method(
+ self.export_data, default_timeout=None, client_info=client_info,
+ ),
+ self.list_data_items: gapic_v1.method.wrap_method(
+ self.list_data_items, default_timeout=None, client_info=client_info,
+ ),
+ self.get_annotation_spec: gapic_v1.method.wrap_method(
+ self.get_annotation_spec, default_timeout=None, client_info=client_info,
+ ),
+ self.list_annotations: gapic_v1.method.wrap_method(
+ self.list_annotations, default_timeout=None, client_info=client_info,
+ ),
+ }
+
+ @property
+ def operations_client(self) -> operations_v1.OperationsClient:
+ """Return the client designed to process long-running operations."""
+ raise NotImplementedError()
+
+ @property
+ def create_dataset(
+ self,
+ ) -> typing.Callable[
+ [dataset_service.CreateDatasetRequest],
+ typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def get_dataset(
+ self,
+ ) -> typing.Callable[
+ [dataset_service.GetDatasetRequest],
+ typing.Union[dataset.Dataset, typing.Awaitable[dataset.Dataset]],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def update_dataset(
+ self,
+ ) -> typing.Callable[
+ [dataset_service.UpdateDatasetRequest],
+ typing.Union[gca_dataset.Dataset, typing.Awaitable[gca_dataset.Dataset]],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def list_datasets(
+ self,
+ ) -> typing.Callable[
+ [dataset_service.ListDatasetsRequest],
+ typing.Union[
+ dataset_service.ListDatasetsResponse,
+ typing.Awaitable[dataset_service.ListDatasetsResponse],
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def delete_dataset(
+ self,
+ ) -> typing.Callable[
+ [dataset_service.DeleteDatasetRequest],
+ typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def import_data(
+ self,
+ ) -> typing.Callable[
+ [dataset_service.ImportDataRequest],
+ typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def export_data(
+ self,
+ ) -> typing.Callable[
+ [dataset_service.ExportDataRequest],
+ typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def list_data_items(
+ self,
+ ) -> typing.Callable[
+ [dataset_service.ListDataItemsRequest],
+ typing.Union[
+ dataset_service.ListDataItemsResponse,
+ typing.Awaitable[dataset_service.ListDataItemsResponse],
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def get_annotation_spec(
+ self,
+ ) -> typing.Callable[
+ [dataset_service.GetAnnotationSpecRequest],
+ typing.Union[
+ annotation_spec.AnnotationSpec,
+ typing.Awaitable[annotation_spec.AnnotationSpec],
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def list_annotations(
+ self,
+ ) -> typing.Callable[
+ [dataset_service.ListAnnotationsRequest],
+ typing.Union[
+ dataset_service.ListAnnotationsResponse,
+ typing.Awaitable[dataset_service.ListAnnotationsResponse],
+ ],
+ ]:
+ raise NotImplementedError()
+
+
+__all__ = ("DatasetServiceTransport",)
diff --git a/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py
new file mode 100644
index 0000000000..e5a54388cb
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py
@@ -0,0 +1,539 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import warnings
+from typing import Callable, Dict, Optional, Sequence, Tuple
+
+from google.api_core import grpc_helpers # type: ignore
+from google.api_core import operations_v1 # type: ignore
+from google.api_core import gapic_v1 # type: ignore
+from google import auth # type: ignore
+from google.auth import credentials # type: ignore
+from google.auth.transport.grpc import SslCredentials # type: ignore
+
+import grpc # type: ignore
+
+from google.cloud.aiplatform_v1.types import annotation_spec
+from google.cloud.aiplatform_v1.types import dataset
+from google.cloud.aiplatform_v1.types import dataset as gca_dataset
+from google.cloud.aiplatform_v1.types import dataset_service
+from google.longrunning import operations_pb2 as operations # type: ignore
+
+from .base import DatasetServiceTransport, DEFAULT_CLIENT_INFO
+
+
+class DatasetServiceGrpcTransport(DatasetServiceTransport):
+ """gRPC backend transport for DatasetService.
+
+ This class defines the same methods as the primary client, so the
+ primary client can load the underlying transport implementation
+ and call it.
+
+ It sends protocol buffers over the wire using gRPC (which is built on
+ top of HTTP/2); the ``grpcio`` package must be installed.
+ """
+
+ _stubs: Dict[str, Callable]
+
+ def __init__(
+ self,
+ *,
+ host: str = "aiplatform.googleapis.com",
+ credentials: credentials.Credentials = None,
+ credentials_file: str = None,
+ scopes: Sequence[str] = None,
+ channel: grpc.Channel = None,
+ api_mtls_endpoint: str = None,
+ client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
+ ssl_channel_credentials: grpc.ChannelCredentials = None,
+ client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
+ quota_project_id: Optional[str] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ ) -> None:
+ """Instantiate the transport.
+
+ Args:
+ host (Optional[str]): The hostname to connect to.
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ This argument is ignored if ``channel`` is provided.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is ignored if ``channel`` is provided.
+ scopes (Optional(Sequence[str])): A list of scopes. This argument is
+ ignored if ``channel`` is provided.
+ channel (Optional[grpc.Channel]): A ``Channel`` instance through
+ which to make calls.
+ api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
+ If provided, it overrides the ``host`` argument and tries to create
+ a mutual TLS channel with client SSL credentials from
+ ``client_cert_source`` or applicatin default SSL credentials.
+ client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
+ Deprecated. A callback to provide client SSL certificate bytes and
+ private key bytes, both in PEM format. It is ignored if
+ ``api_mtls_endpoint`` is None.
+ ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
+ for grpc channel. It is ignored if ``channel`` is provided.
+ client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
+ A callback to provide client certificate bytes and private key bytes,
+ both in PEM format. It is used to configure mutual TLS channel. It is
+ ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+
+ Raises:
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
+ creation failed for any reason.
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
+ and ``credentials_file`` are passed.
+ """
+ self._ssl_channel_credentials = ssl_channel_credentials
+
+ if api_mtls_endpoint:
+ warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
+ if client_cert_source:
+ warnings.warn("client_cert_source is deprecated", DeprecationWarning)
+
+ if channel:
+ # Sanity check: Ensure that channel and credentials are not both
+ # provided.
+ credentials = False
+
+ # If a channel was explicitly provided, set it.
+ self._grpc_channel = channel
+ self._ssl_channel_credentials = None
+ elif api_mtls_endpoint:
+ host = (
+ api_mtls_endpoint
+ if ":" in api_mtls_endpoint
+ else api_mtls_endpoint + ":443"
+ )
+
+ if credentials is None:
+ credentials, _ = auth.default(
+ scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
+ )
+
+ # Create SSL credentials with client_cert_source or application
+ # default SSL credentials.
+ if client_cert_source:
+ cert, key = client_cert_source()
+ ssl_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+ else:
+ ssl_credentials = SslCredentials().ssl_credentials
+
+ # create a new channel. The provided one is ignored.
+ self._grpc_channel = type(self).create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ ssl_credentials=ssl_credentials,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ self._ssl_channel_credentials = ssl_credentials
+ else:
+ host = host if ":" in host else host + ":443"
+
+ if credentials is None:
+ credentials, _ = auth.default(
+ scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
+ )
+
+ if client_cert_source_for_mtls and not ssl_channel_credentials:
+ cert, key = client_cert_source_for_mtls()
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+
+ # create a new channel. The provided one is ignored.
+ self._grpc_channel = type(self).create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ ssl_credentials=self._ssl_channel_credentials,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+ self._stubs = {} # type: Dict[str, Callable]
+ self._operations_client = None
+
+ # Run the base constructor.
+ super().__init__(
+ host=host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ client_info=client_info,
+ )
+
+ @classmethod
+ def create_channel(
+ cls,
+ host: str = "aiplatform.googleapis.com",
+ credentials: credentials.Credentials = None,
+ credentials_file: str = None,
+ scopes: Optional[Sequence[str]] = None,
+ quota_project_id: Optional[str] = None,
+ **kwargs,
+ ) -> grpc.Channel:
+ """Create and return a gRPC channel object.
+ Args:
+ address (Optional[str]): The host for the channel to use.
+ credentials (Optional[~.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify this application to the service. If
+ none are specified, the client will attempt to ascertain
+ the credentials from the environment.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is mutually exclusive with credentials.
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
+ service. These are only used when credentials are not specified and
+ are passed to :func:`google.auth.default`.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ kwargs (Optional[dict]): Keyword arguments, which are passed to the
+ channel creation.
+ Returns:
+ grpc.Channel: A gRPC channel object.
+
+ Raises:
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
+ and ``credentials_file`` are passed.
+ """
+ scopes = scopes or cls.AUTH_SCOPES
+ return grpc_helpers.create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes,
+ quota_project_id=quota_project_id,
+ **kwargs,
+ )
+
+ @property
+ def grpc_channel(self) -> grpc.Channel:
+ """Return the channel designed to connect to this service.
+ """
+ return self._grpc_channel
+
+ @property
+ def operations_client(self) -> operations_v1.OperationsClient:
+ """Create the client designed to process long-running operations.
+
+ This property caches on the instance; repeated calls return the same
+ client.
+ """
+ # Sanity check: Only create a new client if we do not already have one.
+ if self._operations_client is None:
+ self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
+
+ # Return the client from cache.
+ return self._operations_client
+
+ @property
+ def create_dataset(
+ self,
+ ) -> Callable[[dataset_service.CreateDatasetRequest], operations.Operation]:
+ r"""Return a callable for the create dataset method over gRPC.
+
+ Creates a Dataset.
+
+ Returns:
+ Callable[[~.CreateDatasetRequest],
+ ~.Operation]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "create_dataset" not in self._stubs:
+ self._stubs["create_dataset"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.DatasetService/CreateDataset",
+ request_serializer=dataset_service.CreateDatasetRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["create_dataset"]
+
+ @property
+ def get_dataset(
+ self,
+ ) -> Callable[[dataset_service.GetDatasetRequest], dataset.Dataset]:
+ r"""Return a callable for the get dataset method over gRPC.
+
+ Gets a Dataset.
+
+ Returns:
+ Callable[[~.GetDatasetRequest],
+ ~.Dataset]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "get_dataset" not in self._stubs:
+ self._stubs["get_dataset"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.DatasetService/GetDataset",
+ request_serializer=dataset_service.GetDatasetRequest.serialize,
+ response_deserializer=dataset.Dataset.deserialize,
+ )
+ return self._stubs["get_dataset"]
+
+ @property
+ def update_dataset(
+ self,
+ ) -> Callable[[dataset_service.UpdateDatasetRequest], gca_dataset.Dataset]:
+ r"""Return a callable for the update dataset method over gRPC.
+
+ Updates a Dataset.
+
+ Returns:
+ Callable[[~.UpdateDatasetRequest],
+ ~.Dataset]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "update_dataset" not in self._stubs:
+ self._stubs["update_dataset"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.DatasetService/UpdateDataset",
+ request_serializer=dataset_service.UpdateDatasetRequest.serialize,
+ response_deserializer=gca_dataset.Dataset.deserialize,
+ )
+ return self._stubs["update_dataset"]
+
+ @property
+ def list_datasets(
+ self,
+ ) -> Callable[
+ [dataset_service.ListDatasetsRequest], dataset_service.ListDatasetsResponse
+ ]:
+ r"""Return a callable for the list datasets method over gRPC.
+
+ Lists Datasets in a Location.
+
+ Returns:
+ Callable[[~.ListDatasetsRequest],
+ ~.ListDatasetsResponse]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_datasets" not in self._stubs:
+ self._stubs["list_datasets"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.DatasetService/ListDatasets",
+ request_serializer=dataset_service.ListDatasetsRequest.serialize,
+ response_deserializer=dataset_service.ListDatasetsResponse.deserialize,
+ )
+ return self._stubs["list_datasets"]
+
+ @property
+ def delete_dataset(
+ self,
+ ) -> Callable[[dataset_service.DeleteDatasetRequest], operations.Operation]:
+ r"""Return a callable for the delete dataset method over gRPC.
+
+ Deletes a Dataset.
+
+ Returns:
+ Callable[[~.DeleteDatasetRequest],
+ ~.Operation]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "delete_dataset" not in self._stubs:
+ self._stubs["delete_dataset"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.DatasetService/DeleteDataset",
+ request_serializer=dataset_service.DeleteDatasetRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["delete_dataset"]
+
+ @property
+ def import_data(
+ self,
+ ) -> Callable[[dataset_service.ImportDataRequest], operations.Operation]:
+ r"""Return a callable for the import data method over gRPC.
+
+ Imports data into a Dataset.
+
+ Returns:
+ Callable[[~.ImportDataRequest],
+ ~.Operation]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "import_data" not in self._stubs:
+ self._stubs["import_data"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.DatasetService/ImportData",
+ request_serializer=dataset_service.ImportDataRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["import_data"]
+
+ @property
+ def export_data(
+ self,
+ ) -> Callable[[dataset_service.ExportDataRequest], operations.Operation]:
+ r"""Return a callable for the export data method over gRPC.
+
+ Exports data from a Dataset.
+
+ Returns:
+ Callable[[~.ExportDataRequest],
+ ~.Operation]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "export_data" not in self._stubs:
+ self._stubs["export_data"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.DatasetService/ExportData",
+ request_serializer=dataset_service.ExportDataRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["export_data"]
+
+ @property
+ def list_data_items(
+ self,
+ ) -> Callable[
+ [dataset_service.ListDataItemsRequest], dataset_service.ListDataItemsResponse
+ ]:
+ r"""Return a callable for the list data items method over gRPC.
+
+ Lists DataItems in a Dataset.
+
+ Returns:
+ Callable[[~.ListDataItemsRequest],
+ ~.ListDataItemsResponse]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_data_items" not in self._stubs:
+ self._stubs["list_data_items"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.DatasetService/ListDataItems",
+ request_serializer=dataset_service.ListDataItemsRequest.serialize,
+ response_deserializer=dataset_service.ListDataItemsResponse.deserialize,
+ )
+ return self._stubs["list_data_items"]
+
+ @property
+ def get_annotation_spec(
+ self,
+ ) -> Callable[
+ [dataset_service.GetAnnotationSpecRequest], annotation_spec.AnnotationSpec
+ ]:
+ r"""Return a callable for the get annotation spec method over gRPC.
+
+ Gets an AnnotationSpec.
+
+ Returns:
+ Callable[[~.GetAnnotationSpecRequest],
+ ~.AnnotationSpec]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "get_annotation_spec" not in self._stubs:
+ self._stubs["get_annotation_spec"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.DatasetService/GetAnnotationSpec",
+ request_serializer=dataset_service.GetAnnotationSpecRequest.serialize,
+ response_deserializer=annotation_spec.AnnotationSpec.deserialize,
+ )
+ return self._stubs["get_annotation_spec"]
+
+ @property
+ def list_annotations(
+ self,
+ ) -> Callable[
+ [dataset_service.ListAnnotationsRequest],
+ dataset_service.ListAnnotationsResponse,
+ ]:
+ r"""Return a callable for the list annotations method over gRPC.
+
+ Lists Annotations belongs to a dataitem
+
+ Returns:
+ Callable[[~.ListAnnotationsRequest],
+ ~.ListAnnotationsResponse]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_annotations" not in self._stubs:
+ self._stubs["list_annotations"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.DatasetService/ListAnnotations",
+ request_serializer=dataset_service.ListAnnotationsRequest.serialize,
+ response_deserializer=dataset_service.ListAnnotationsResponse.deserialize,
+ )
+ return self._stubs["list_annotations"]
+
+
+__all__ = ("DatasetServiceGrpcTransport",)
diff --git a/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py
new file mode 100644
index 0000000000..bcf3331d6b
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py
@@ -0,0 +1,554 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import warnings
+from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple
+
+from google.api_core import gapic_v1 # type: ignore
+from google.api_core import grpc_helpers_async # type: ignore
+from google.api_core import operations_v1 # type: ignore
+from google import auth # type: ignore
+from google.auth import credentials # type: ignore
+from google.auth.transport.grpc import SslCredentials # type: ignore
+
+import grpc # type: ignore
+from grpc.experimental import aio # type: ignore
+
+from google.cloud.aiplatform_v1.types import annotation_spec
+from google.cloud.aiplatform_v1.types import dataset
+from google.cloud.aiplatform_v1.types import dataset as gca_dataset
+from google.cloud.aiplatform_v1.types import dataset_service
+from google.longrunning import operations_pb2 as operations # type: ignore
+
+from .base import DatasetServiceTransport, DEFAULT_CLIENT_INFO
+from .grpc import DatasetServiceGrpcTransport
+
+
+class DatasetServiceGrpcAsyncIOTransport(DatasetServiceTransport):
+ """gRPC AsyncIO backend transport for DatasetService.
+
+ This class defines the same methods as the primary client, so the
+ primary client can load the underlying transport implementation
+ and call it.
+
+ It sends protocol buffers over the wire using gRPC (which is built on
+ top of HTTP/2); the ``grpcio`` package must be installed.
+ """
+
+ _grpc_channel: aio.Channel
+ _stubs: Dict[str, Callable] = {}
+
+ @classmethod
+ def create_channel(
+ cls,
+ host: str = "aiplatform.googleapis.com",
+ credentials: credentials.Credentials = None,
+ credentials_file: Optional[str] = None,
+ scopes: Optional[Sequence[str]] = None,
+ quota_project_id: Optional[str] = None,
+ **kwargs,
+ ) -> aio.Channel:
+ """Create and return a gRPC AsyncIO channel object.
+ Args:
+ address (Optional[str]): The host for the channel to use.
+ credentials (Optional[~.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify this application to the service. If
+ none are specified, the client will attempt to ascertain
+ the credentials from the environment.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is ignored if ``channel`` is provided.
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
+ service. These are only used when credentials are not specified and
+ are passed to :func:`google.auth.default`.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ kwargs (Optional[dict]): Keyword arguments, which are passed to the
+ channel creation.
+ Returns:
+ aio.Channel: A gRPC AsyncIO channel object.
+ """
+ scopes = scopes or cls.AUTH_SCOPES
+ return grpc_helpers_async.create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes,
+ quota_project_id=quota_project_id,
+ **kwargs,
+ )
+
+ def __init__(
+ self,
+ *,
+ host: str = "aiplatform.googleapis.com",
+ credentials: credentials.Credentials = None,
+ credentials_file: Optional[str] = None,
+ scopes: Optional[Sequence[str]] = None,
+ channel: aio.Channel = None,
+ api_mtls_endpoint: str = None,
+ client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
+ ssl_channel_credentials: grpc.ChannelCredentials = None,
+ client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
+ quota_project_id=None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ ) -> None:
+ """Instantiate the transport.
+
+ Args:
+ host (Optional[str]): The hostname to connect to.
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ This argument is ignored if ``channel`` is provided.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is ignored if ``channel`` is provided.
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
+ service. These are only used when credentials are not specified and
+ are passed to :func:`google.auth.default`.
+ channel (Optional[aio.Channel]): A ``Channel`` instance through
+ which to make calls.
+ api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
+ If provided, it overrides the ``host`` argument and tries to create
+ a mutual TLS channel with client SSL credentials from
+ ``client_cert_source`` or applicatin default SSL credentials.
+ client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
+ Deprecated. A callback to provide client SSL certificate bytes and
+ private key bytes, both in PEM format. It is ignored if
+ ``api_mtls_endpoint`` is None.
+ ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
+ for grpc channel. It is ignored if ``channel`` is provided.
+ client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
+ A callback to provide client certificate bytes and private key bytes,
+ both in PEM format. It is used to configure mutual TLS channel. It is
+ ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+
+ Raises:
+ google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
+ creation failed for any reason.
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
+ and ``credentials_file`` are passed.
+ """
+ self._ssl_channel_credentials = ssl_channel_credentials
+
+ if api_mtls_endpoint:
+ warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
+ if client_cert_source:
+ warnings.warn("client_cert_source is deprecated", DeprecationWarning)
+
+ if channel:
+ # Sanity check: Ensure that channel and credentials are not both
+ # provided.
+ credentials = False
+
+ # If a channel was explicitly provided, set it.
+ self._grpc_channel = channel
+ self._ssl_channel_credentials = None
+ elif api_mtls_endpoint:
+ host = (
+ api_mtls_endpoint
+ if ":" in api_mtls_endpoint
+ else api_mtls_endpoint + ":443"
+ )
+
+ if credentials is None:
+ credentials, _ = auth.default(
+ scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
+ )
+
+ # Create SSL credentials with client_cert_source or application
+ # default SSL credentials.
+ if client_cert_source:
+ cert, key = client_cert_source()
+ ssl_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+ else:
+ ssl_credentials = SslCredentials().ssl_credentials
+
+ # create a new channel. The provided one is ignored.
+ self._grpc_channel = type(self).create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ ssl_credentials=ssl_credentials,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ self._ssl_channel_credentials = ssl_credentials
+ else:
+ host = host if ":" in host else host + ":443"
+
+ if credentials is None:
+ credentials, _ = auth.default(
+ scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
+ )
+
+ if client_cert_source_for_mtls and not ssl_channel_credentials:
+ cert, key = client_cert_source_for_mtls()
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+
+ # create a new channel. The provided one is ignored.
+ self._grpc_channel = type(self).create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ ssl_credentials=self._ssl_channel_credentials,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+ # Run the base constructor.
+ super().__init__(
+ host=host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ client_info=client_info,
+ )
+
+ self._stubs = {}
+ self._operations_client = None
+
+ @property
+ def grpc_channel(self) -> aio.Channel:
+ """Create the channel designed to connect to this service.
+
+ This property caches on the instance; repeated calls return
+ the same channel.
+ """
+ # Return the channel from cache.
+ return self._grpc_channel
+
+ @property
+ def operations_client(self) -> operations_v1.OperationsAsyncClient:
+ """Create the client designed to process long-running operations.
+
+ This property caches on the instance; repeated calls return the same
+ client.
+ """
+ # Sanity check: Only create a new client if we do not already have one.
+ if self._operations_client is None:
+ self._operations_client = operations_v1.OperationsAsyncClient(
+ self.grpc_channel
+ )
+
+ # Return the client from cache.
+ return self._operations_client
+
+ @property
+ def create_dataset(
+ self,
+ ) -> Callable[
+ [dataset_service.CreateDatasetRequest], Awaitable[operations.Operation]
+ ]:
+ r"""Return a callable for the create dataset method over gRPC.
+
+ Creates a Dataset.
+
+ Returns:
+ Callable[[~.CreateDatasetRequest],
+ Awaitable[~.Operation]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "create_dataset" not in self._stubs:
+ self._stubs["create_dataset"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.DatasetService/CreateDataset",
+ request_serializer=dataset_service.CreateDatasetRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["create_dataset"]
+
+ @property
+ def get_dataset(
+ self,
+ ) -> Callable[[dataset_service.GetDatasetRequest], Awaitable[dataset.Dataset]]:
+ r"""Return a callable for the get dataset method over gRPC.
+
+ Gets a Dataset.
+
+ Returns:
+ Callable[[~.GetDatasetRequest],
+ Awaitable[~.Dataset]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "get_dataset" not in self._stubs:
+ self._stubs["get_dataset"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.DatasetService/GetDataset",
+ request_serializer=dataset_service.GetDatasetRequest.serialize,
+ response_deserializer=dataset.Dataset.deserialize,
+ )
+ return self._stubs["get_dataset"]
+
+ @property
+ def update_dataset(
+ self,
+ ) -> Callable[
+ [dataset_service.UpdateDatasetRequest], Awaitable[gca_dataset.Dataset]
+ ]:
+ r"""Return a callable for the update dataset method over gRPC.
+
+ Updates a Dataset.
+
+ Returns:
+ Callable[[~.UpdateDatasetRequest],
+ Awaitable[~.Dataset]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "update_dataset" not in self._stubs:
+ self._stubs["update_dataset"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.DatasetService/UpdateDataset",
+ request_serializer=dataset_service.UpdateDatasetRequest.serialize,
+ response_deserializer=gca_dataset.Dataset.deserialize,
+ )
+ return self._stubs["update_dataset"]
+
+ @property
+ def list_datasets(
+ self,
+ ) -> Callable[
+ [dataset_service.ListDatasetsRequest],
+ Awaitable[dataset_service.ListDatasetsResponse],
+ ]:
+ r"""Return a callable for the list datasets method over gRPC.
+
+ Lists Datasets in a Location.
+
+ Returns:
+ Callable[[~.ListDatasetsRequest],
+ Awaitable[~.ListDatasetsResponse]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_datasets" not in self._stubs:
+ self._stubs["list_datasets"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.DatasetService/ListDatasets",
+ request_serializer=dataset_service.ListDatasetsRequest.serialize,
+ response_deserializer=dataset_service.ListDatasetsResponse.deserialize,
+ )
+ return self._stubs["list_datasets"]
+
+ @property
+ def delete_dataset(
+ self,
+ ) -> Callable[
+ [dataset_service.DeleteDatasetRequest], Awaitable[operations.Operation]
+ ]:
+ r"""Return a callable for the delete dataset method over gRPC.
+
+ Deletes a Dataset.
+
+ Returns:
+ Callable[[~.DeleteDatasetRequest],
+ Awaitable[~.Operation]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "delete_dataset" not in self._stubs:
+ self._stubs["delete_dataset"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.DatasetService/DeleteDataset",
+ request_serializer=dataset_service.DeleteDatasetRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["delete_dataset"]
+
+ @property
+ def import_data(
+ self,
+ ) -> Callable[[dataset_service.ImportDataRequest], Awaitable[operations.Operation]]:
+ r"""Return a callable for the import data method over gRPC.
+
+ Imports data into a Dataset.
+
+ Returns:
+ Callable[[~.ImportDataRequest],
+ Awaitable[~.Operation]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "import_data" not in self._stubs:
+ self._stubs["import_data"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.DatasetService/ImportData",
+ request_serializer=dataset_service.ImportDataRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["import_data"]
+
+ @property
+ def export_data(
+ self,
+ ) -> Callable[[dataset_service.ExportDataRequest], Awaitable[operations.Operation]]:
+ r"""Return a callable for the export data method over gRPC.
+
+ Exports data from a Dataset.
+
+ Returns:
+ Callable[[~.ExportDataRequest],
+ Awaitable[~.Operation]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "export_data" not in self._stubs:
+ self._stubs["export_data"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.DatasetService/ExportData",
+ request_serializer=dataset_service.ExportDataRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["export_data"]
+
+ @property
+ def list_data_items(
+ self,
+ ) -> Callable[
+ [dataset_service.ListDataItemsRequest],
+ Awaitable[dataset_service.ListDataItemsResponse],
+ ]:
+ r"""Return a callable for the list data items method over gRPC.
+
+ Lists DataItems in a Dataset.
+
+ Returns:
+ Callable[[~.ListDataItemsRequest],
+ Awaitable[~.ListDataItemsResponse]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_data_items" not in self._stubs:
+ self._stubs["list_data_items"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.DatasetService/ListDataItems",
+ request_serializer=dataset_service.ListDataItemsRequest.serialize,
+ response_deserializer=dataset_service.ListDataItemsResponse.deserialize,
+ )
+ return self._stubs["list_data_items"]
+
+ @property
+ def get_annotation_spec(
+ self,
+ ) -> Callable[
+ [dataset_service.GetAnnotationSpecRequest],
+ Awaitable[annotation_spec.AnnotationSpec],
+ ]:
+ r"""Return a callable for the get annotation spec method over gRPC.
+
+ Gets an AnnotationSpec.
+
+ Returns:
+ Callable[[~.GetAnnotationSpecRequest],
+ Awaitable[~.AnnotationSpec]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "get_annotation_spec" not in self._stubs:
+ self._stubs["get_annotation_spec"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.DatasetService/GetAnnotationSpec",
+ request_serializer=dataset_service.GetAnnotationSpecRequest.serialize,
+ response_deserializer=annotation_spec.AnnotationSpec.deserialize,
+ )
+ return self._stubs["get_annotation_spec"]
+
+ @property
+ def list_annotations(
+ self,
+ ) -> Callable[
+ [dataset_service.ListAnnotationsRequest],
+ Awaitable[dataset_service.ListAnnotationsResponse],
+ ]:
+ r"""Return a callable for the list annotations method over gRPC.
+
+ Lists Annotations belongs to a dataitem
+
+ Returns:
+ Callable[[~.ListAnnotationsRequest],
+ Awaitable[~.ListAnnotationsResponse]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_annotations" not in self._stubs:
+ self._stubs["list_annotations"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.DatasetService/ListAnnotations",
+ request_serializer=dataset_service.ListAnnotationsRequest.serialize,
+ response_deserializer=dataset_service.ListAnnotationsResponse.deserialize,
+ )
+ return self._stubs["list_annotations"]
+
+
+__all__ = ("DatasetServiceGrpcAsyncIOTransport",)
diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/__init__.py b/google/cloud/aiplatform_v1/services/endpoint_service/__init__.py
new file mode 100644
index 0000000000..035a5b2388
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/endpoint_service/__init__.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from .client import EndpointServiceClient
+from .async_client import EndpointServiceAsyncClient
+
+__all__ = (
+ "EndpointServiceClient",
+ "EndpointServiceAsyncClient",
+)
diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py b/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py
new file mode 100644
index 0000000000..e36aa6dfde
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py
@@ -0,0 +1,841 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from collections import OrderedDict
+import functools
+import re
+from typing import Dict, Sequence, Tuple, Type, Union
+import pkg_resources
+
+import google.api_core.client_options as ClientOptions # type: ignore
+from google.api_core import exceptions # type: ignore
+from google.api_core import gapic_v1 # type: ignore
+from google.api_core import retry as retries # type: ignore
+from google.auth import credentials # type: ignore
+from google.oauth2 import service_account # type: ignore
+
+from google.api_core import operation as ga_operation # type: ignore
+from google.api_core import operation_async # type: ignore
+from google.cloud.aiplatform_v1.services.endpoint_service import pagers
+from google.cloud.aiplatform_v1.types import encryption_spec
+from google.cloud.aiplatform_v1.types import endpoint
+from google.cloud.aiplatform_v1.types import endpoint as gca_endpoint
+from google.cloud.aiplatform_v1.types import endpoint_service
+from google.cloud.aiplatform_v1.types import operation as gca_operation
+from google.protobuf import empty_pb2 as empty # type: ignore
+from google.protobuf import field_mask_pb2 as field_mask # type: ignore
+from google.protobuf import timestamp_pb2 as timestamp # type: ignore
+
+from .transports.base import EndpointServiceTransport, DEFAULT_CLIENT_INFO
+from .transports.grpc_asyncio import EndpointServiceGrpcAsyncIOTransport
+from .client import EndpointServiceClient
+
+
+class EndpointServiceAsyncClient:
+ """"""
+
+ _client: EndpointServiceClient
+
+ DEFAULT_ENDPOINT = EndpointServiceClient.DEFAULT_ENDPOINT
+ DEFAULT_MTLS_ENDPOINT = EndpointServiceClient.DEFAULT_MTLS_ENDPOINT
+
+ endpoint_path = staticmethod(EndpointServiceClient.endpoint_path)
+ parse_endpoint_path = staticmethod(EndpointServiceClient.parse_endpoint_path)
+ model_path = staticmethod(EndpointServiceClient.model_path)
+ parse_model_path = staticmethod(EndpointServiceClient.parse_model_path)
+
+ common_billing_account_path = staticmethod(
+ EndpointServiceClient.common_billing_account_path
+ )
+ parse_common_billing_account_path = staticmethod(
+ EndpointServiceClient.parse_common_billing_account_path
+ )
+
+ common_folder_path = staticmethod(EndpointServiceClient.common_folder_path)
+ parse_common_folder_path = staticmethod(
+ EndpointServiceClient.parse_common_folder_path
+ )
+
+ common_organization_path = staticmethod(
+ EndpointServiceClient.common_organization_path
+ )
+ parse_common_organization_path = staticmethod(
+ EndpointServiceClient.parse_common_organization_path
+ )
+
+ common_project_path = staticmethod(EndpointServiceClient.common_project_path)
+ parse_common_project_path = staticmethod(
+ EndpointServiceClient.parse_common_project_path
+ )
+
+ common_location_path = staticmethod(EndpointServiceClient.common_location_path)
+ parse_common_location_path = staticmethod(
+ EndpointServiceClient.parse_common_location_path
+ )
+
+ from_service_account_info = EndpointServiceClient.from_service_account_info
+ from_service_account_file = EndpointServiceClient.from_service_account_file
+ from_service_account_json = from_service_account_file
+
+ @property
+ def transport(self) -> EndpointServiceTransport:
+ """Return the transport used by the client instance.
+
+ Returns:
+ EndpointServiceTransport: The transport used by the client instance.
+ """
+ return self._client.transport
+
+ get_transport_class = functools.partial(
+ type(EndpointServiceClient).get_transport_class, type(EndpointServiceClient)
+ )
+
+ def __init__(
+ self,
+ *,
+ credentials: credentials.Credentials = None,
+ transport: Union[str, EndpointServiceTransport] = "grpc_asyncio",
+ client_options: ClientOptions = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ ) -> None:
+ """Instantiate the endpoint service client.
+
+ Args:
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ transport (Union[str, ~.EndpointServiceTransport]): The
+ transport to use. If set to None, a transport is chosen
+ automatically.
+ client_options (ClientOptions): Custom options for the client. It
+ won't take effect if a ``transport`` instance is provided.
+ (1) The ``api_endpoint`` property can be used to override the
+ default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
+ environment variable can also be used to override the endpoint:
+ "always" (always use the default mTLS endpoint), "never" (always
+ use the default regular endpoint) and "auto" (auto switch to the
+ default mTLS endpoint if client certificate is present, this is
+ the default value). However, the ``api_endpoint`` property takes
+ precedence if provided.
+ (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
+ is "true", then the ``client_cert_source`` property can be used
+ to provide client certificate for mutual TLS transport. If
+ not provided, the default SSL client certificate will be used if
+ present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
+ set, no client certificate will be used.
+
+ Raises:
+ google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
+ creation failed for any reason.
+ """
+
+ self._client = EndpointServiceClient(
+ credentials=credentials,
+ transport=transport,
+ client_options=client_options,
+ client_info=client_info,
+ )
+
+ async def create_endpoint(
+ self,
+ request: endpoint_service.CreateEndpointRequest = None,
+ *,
+ parent: str = None,
+ endpoint: gca_endpoint.Endpoint = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Creates an Endpoint.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.CreateEndpointRequest`):
+ The request object. Request message for
+ ``EndpointService.CreateEndpoint``.
+ parent (:class:`str`):
+ Required. The resource name of the Location to create
+ the Endpoint in. Format:
+ ``projects/{project}/locations/{location}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ endpoint (:class:`google.cloud.aiplatform_v1.types.Endpoint`):
+ Required. The Endpoint to create.
+ This corresponds to the ``endpoint`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Endpoint` Models are deployed into it, and afterwards Endpoint is called to obtain
+ predictions and explanations.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent, endpoint])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = endpoint_service.CreateEndpointRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+ if endpoint is not None:
+ request.endpoint = endpoint
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.create_endpoint,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ gca_endpoint.Endpoint,
+ metadata_type=endpoint_service.CreateEndpointOperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def get_endpoint(
+ self,
+ request: endpoint_service.GetEndpointRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> endpoint.Endpoint:
+ r"""Gets an Endpoint.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.GetEndpointRequest`):
+ The request object. Request message for
+ ``EndpointService.GetEndpoint``
+ name (:class:`str`):
+ Required. The name of the Endpoint resource. Format:
+ ``projects/{project}/locations/{location}/endpoints/{endpoint}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.types.Endpoint:
+ Models are deployed into it, and
+ afterwards Endpoint is called to obtain
+ predictions and explanations.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = endpoint_service.GetEndpointRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.get_endpoint,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ async def list_endpoints(
+ self,
+ request: endpoint_service.ListEndpointsRequest = None,
+ *,
+ parent: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> pagers.ListEndpointsAsyncPager:
+ r"""Lists Endpoints in a Location.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.ListEndpointsRequest`):
+ The request object. Request message for
+ ``EndpointService.ListEndpoints``.
+ parent (:class:`str`):
+ Required. The resource name of the Location from which
+ to list the Endpoints. Format:
+ ``projects/{project}/locations/{location}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.services.endpoint_service.pagers.ListEndpointsAsyncPager:
+ Response message for
+ ``EndpointService.ListEndpoints``.
+
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = endpoint_service.ListEndpointsRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.list_endpoints,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__aiter__` convenience method.
+ response = pagers.ListEndpointsAsyncPager(
+ method=rpc, request=request, response=response, metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def update_endpoint(
+ self,
+ request: endpoint_service.UpdateEndpointRequest = None,
+ *,
+ endpoint: gca_endpoint.Endpoint = None,
+ update_mask: field_mask.FieldMask = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> gca_endpoint.Endpoint:
+ r"""Updates an Endpoint.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.UpdateEndpointRequest`):
+ The request object. Request message for
+ ``EndpointService.UpdateEndpoint``.
+ endpoint (:class:`google.cloud.aiplatform_v1.types.Endpoint`):
+ Required. The Endpoint which replaces
+ the resource on the server.
+
+ This corresponds to the ``endpoint`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
+ Required. The update mask applies to the resource. See
+ `FieldMask `__.
+
+ This corresponds to the ``update_mask`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.types.Endpoint:
+ Models are deployed into it, and
+ afterwards Endpoint is called to obtain
+ predictions and explanations.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([endpoint, update_mask])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = endpoint_service.UpdateEndpointRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if endpoint is not None:
+ request.endpoint = endpoint
+ if update_mask is not None:
+ request.update_mask = update_mask
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.update_endpoint,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata(
+ (("endpoint.name", request.endpoint.name),)
+ ),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ async def delete_endpoint(
+ self,
+ request: endpoint_service.DeleteEndpointRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Deletes an Endpoint.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.DeleteEndpointRequest`):
+ The request object. Request message for
+ ``EndpointService.DeleteEndpoint``.
+ name (:class:`str`):
+ Required. The name of the Endpoint resource to be
+ deleted. Format:
+ ``projects/{project}/locations/{location}/endpoints/{endpoint}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
+ empty messages in your APIs. A typical example is to
+ use it as the request or the response type of an API
+ method. For instance:
+
+ service Foo {
+ rpc Bar(google.protobuf.Empty) returns
+ (google.protobuf.Empty);
+
+ }
+
+ The JSON representation for Empty is empty JSON
+ object {}.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = endpoint_service.DeleteEndpointRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.delete_endpoint,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ empty.Empty,
+ metadata_type=gca_operation.DeleteOperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def deploy_model(
+ self,
+ request: endpoint_service.DeployModelRequest = None,
+ *,
+ endpoint: str = None,
+ deployed_model: gca_endpoint.DeployedModel = None,
+ traffic_split: Sequence[
+ endpoint_service.DeployModelRequest.TrafficSplitEntry
+ ] = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Deploys a Model into this Endpoint, creating a
+ DeployedModel within it.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.DeployModelRequest`):
+ The request object. Request message for
+ ``EndpointService.DeployModel``.
+ endpoint (:class:`str`):
+ Required. The name of the Endpoint resource into which
+ to deploy a Model. Format:
+ ``projects/{project}/locations/{location}/endpoints/{endpoint}``
+
+ This corresponds to the ``endpoint`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ deployed_model (:class:`google.cloud.aiplatform_v1.types.DeployedModel`):
+ Required. The DeployedModel to be created within the
+ Endpoint. Note that
+ ``Endpoint.traffic_split``
+ must be updated for the DeployedModel to start receiving
+ traffic, either as part of this call, or via
+ ``EndpointService.UpdateEndpoint``.
+
+ This corresponds to the ``deployed_model`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ traffic_split (:class:`Sequence[google.cloud.aiplatform_v1.types.DeployModelRequest.TrafficSplitEntry]`):
+ A map from a DeployedModel's ID to the percentage of
+ this Endpoint's traffic that should be forwarded to that
+ DeployedModel.
+
+ If this field is non-empty, then the Endpoint's
+ ``traffic_split``
+ will be overwritten with it. To refer to the ID of the
+ just being deployed Model, a "0" should be used, and the
+ actual ID of the new DeployedModel will be filled in its
+ place by this method. The traffic percentage values must
+ add up to 100.
+
+ If this field is empty, then the Endpoint's
+ ``traffic_split``
+ is not updated.
+
+ This corresponds to the ``traffic_split`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:`google.cloud.aiplatform_v1.types.DeployModelResponse`
+ Response message for
+ ``EndpointService.DeployModel``.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([endpoint, deployed_model, traffic_split])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = endpoint_service.DeployModelRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if endpoint is not None:
+ request.endpoint = endpoint
+ if deployed_model is not None:
+ request.deployed_model = deployed_model
+
+ if traffic_split:
+ request.traffic_split.update(traffic_split)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.deploy_model,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("endpoint", request.endpoint),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ endpoint_service.DeployModelResponse,
+ metadata_type=endpoint_service.DeployModelOperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def undeploy_model(
+ self,
+ request: endpoint_service.UndeployModelRequest = None,
+ *,
+ endpoint: str = None,
+ deployed_model_id: str = None,
+ traffic_split: Sequence[
+ endpoint_service.UndeployModelRequest.TrafficSplitEntry
+ ] = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Undeploys a Model from an Endpoint, removing a
+ DeployedModel from it, and freeing all resources it's
+ using.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.UndeployModelRequest`):
+ The request object. Request message for
+ ``EndpointService.UndeployModel``.
+ endpoint (:class:`str`):
+ Required. The name of the Endpoint resource from which
+ to undeploy a Model. Format:
+ ``projects/{project}/locations/{location}/endpoints/{endpoint}``
+
+ This corresponds to the ``endpoint`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ deployed_model_id (:class:`str`):
+ Required. The ID of the DeployedModel
+ to be undeployed from the Endpoint.
+
+ This corresponds to the ``deployed_model_id`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ traffic_split (:class:`Sequence[google.cloud.aiplatform_v1.types.UndeployModelRequest.TrafficSplitEntry]`):
+ If this field is provided, then the Endpoint's
+ ``traffic_split``
+ will be overwritten with it. If last DeployedModel is
+ being undeployed from the Endpoint, the
+ [Endpoint.traffic_split] will always end up empty when
+ this call returns. A DeployedModel will be successfully
+ undeployed only if it doesn't have any traffic assigned
+ to it when this method executes, or if this field
+ unassigns any traffic to it.
+
+ This corresponds to the ``traffic_split`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:`google.cloud.aiplatform_v1.types.UndeployModelResponse`
+ Response message for
+ ``EndpointService.UndeployModel``.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([endpoint, deployed_model_id, traffic_split])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = endpoint_service.UndeployModelRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if endpoint is not None:
+ request.endpoint = endpoint
+ if deployed_model_id is not None:
+ request.deployed_model_id = deployed_model_id
+
+ if traffic_split:
+ request.traffic_split.update(traffic_split)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.undeploy_model,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("endpoint", request.endpoint),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ endpoint_service.UndeployModelResponse,
+ metadata_type=endpoint_service.UndeployModelOperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+
+try:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=pkg_resources.get_distribution(
+ "google-cloud-aiplatform",
+ ).version,
+ )
+except pkg_resources.DistributionNotFound:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
+
+
+__all__ = ("EndpointServiceAsyncClient",)
diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/client.py b/google/cloud/aiplatform_v1/services/endpoint_service/client.py
new file mode 100644
index 0000000000..1316effa58
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/endpoint_service/client.py
@@ -0,0 +1,1064 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from collections import OrderedDict
+from distutils import util
+import os
+import re
+from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
+import pkg_resources
+
+from google.api_core import client_options as client_options_lib # type: ignore
+from google.api_core import exceptions # type: ignore
+from google.api_core import gapic_v1 # type: ignore
+from google.api_core import retry as retries # type: ignore
+from google.auth import credentials # type: ignore
+from google.auth.transport import mtls # type: ignore
+from google.auth.transport.grpc import SslCredentials # type: ignore
+from google.auth.exceptions import MutualTLSChannelError # type: ignore
+from google.oauth2 import service_account # type: ignore
+
+from google.api_core import operation as ga_operation # type: ignore
+from google.api_core import operation_async # type: ignore
+from google.cloud.aiplatform_v1.services.endpoint_service import pagers
+from google.cloud.aiplatform_v1.types import encryption_spec
+from google.cloud.aiplatform_v1.types import endpoint
+from google.cloud.aiplatform_v1.types import endpoint as gca_endpoint
+from google.cloud.aiplatform_v1.types import endpoint_service
+from google.cloud.aiplatform_v1.types import operation as gca_operation
+from google.protobuf import empty_pb2 as empty # type: ignore
+from google.protobuf import field_mask_pb2 as field_mask # type: ignore
+from google.protobuf import timestamp_pb2 as timestamp # type: ignore
+
+from .transports.base import EndpointServiceTransport, DEFAULT_CLIENT_INFO
+from .transports.grpc import EndpointServiceGrpcTransport
+from .transports.grpc_asyncio import EndpointServiceGrpcAsyncIOTransport
+
+
+class EndpointServiceClientMeta(type):
+ """Metaclass for the EndpointService client.
+
+ This provides class-level methods for building and retrieving
+ support objects (e.g. transport) without polluting the client instance
+ objects.
+ """
+
+ _transport_registry = (
+ OrderedDict()
+ ) # type: Dict[str, Type[EndpointServiceTransport]]
+ _transport_registry["grpc"] = EndpointServiceGrpcTransport
+ _transport_registry["grpc_asyncio"] = EndpointServiceGrpcAsyncIOTransport
+
+ def get_transport_class(cls, label: str = None,) -> Type[EndpointServiceTransport]:
+ """Return an appropriate transport class.
+
+ Args:
+ label: The name of the desired transport. If none is
+ provided, then the first transport in the registry is used.
+
+ Returns:
+ The transport class to use.
+ """
+ # If a specific transport is requested, return that one.
+ if label:
+ return cls._transport_registry[label]
+
+ # No transport is requested; return the default (that is, the first one
+ # in the dictionary).
+ return next(iter(cls._transport_registry.values()))
+
+
+class EndpointServiceClient(metaclass=EndpointServiceClientMeta):
+ """"""
+
+ @staticmethod
+ def _get_default_mtls_endpoint(api_endpoint):
+ """Convert api endpoint to mTLS endpoint.
+ Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
+ "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
+ Args:
+ api_endpoint (Optional[str]): the api endpoint to convert.
+ Returns:
+ str: converted mTLS api endpoint.
+ """
+ if not api_endpoint:
+ return api_endpoint
+
+ mtls_endpoint_re = re.compile(
+ r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?"
+ )
+
+ m = mtls_endpoint_re.match(api_endpoint)
+ name, mtls, sandbox, googledomain = m.groups()
+ if mtls or not googledomain:
+ return api_endpoint
+
+ if sandbox:
+ return api_endpoint.replace(
+ "sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
+ )
+
+ return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
+
+ DEFAULT_ENDPOINT = "aiplatform.googleapis.com"
+ DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
+ DEFAULT_ENDPOINT
+ )
+
+ @classmethod
+ def from_service_account_info(cls, info: dict, *args, **kwargs):
+ """Creates an instance of this client using the provided credentials info.
+
+ Args:
+ info (dict): The service account private key info.
+ args: Additional arguments to pass to the constructor.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ EndpointServiceClient: The constructed client.
+ """
+ credentials = service_account.Credentials.from_service_account_info(info)
+ kwargs["credentials"] = credentials
+ return cls(*args, **kwargs)
+
+ @classmethod
+ def from_service_account_file(cls, filename: str, *args, **kwargs):
+ """Creates an instance of this client using the provided credentials
+ file.
+
+ Args:
+ filename (str): The path to the service account private key json
+ file.
+ args: Additional arguments to pass to the constructor.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ EndpointServiceClient: The constructed client.
+ """
+ credentials = service_account.Credentials.from_service_account_file(filename)
+ kwargs["credentials"] = credentials
+ return cls(*args, **kwargs)
+
+ from_service_account_json = from_service_account_file
+
+ @property
+ def transport(self) -> EndpointServiceTransport:
+ """Return the transport used by the client instance.
+
+ Returns:
+ EndpointServiceTransport: The transport used by the client instance.
+ """
+ return self._transport
+
+ @staticmethod
+ def endpoint_path(project: str, location: str, endpoint: str,) -> str:
+ """Return a fully-qualified endpoint string."""
+ return "projects/{project}/locations/{location}/endpoints/{endpoint}".format(
+ project=project, location=location, endpoint=endpoint,
+ )
+
+ @staticmethod
+ def parse_endpoint_path(path: str) -> Dict[str, str]:
+ """Parse a endpoint path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def model_path(project: str, location: str, model: str,) -> str:
+ """Return a fully-qualified model string."""
+ return "projects/{project}/locations/{location}/models/{model}".format(
+ project=project, location=location, model=model,
+ )
+
+ @staticmethod
+ def parse_model_path(path: str) -> Dict[str, str]:
+ """Parse a model path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_billing_account_path(billing_account: str,) -> str:
+ """Return a fully-qualified billing_account string."""
+ return "billingAccounts/{billing_account}".format(
+ billing_account=billing_account,
+ )
+
+ @staticmethod
+ def parse_common_billing_account_path(path: str) -> Dict[str, str]:
+ """Parse a billing_account path into its component segments."""
+ m = re.match(r"^billingAccounts/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_folder_path(folder: str,) -> str:
+ """Return a fully-qualified folder string."""
+ return "folders/{folder}".format(folder=folder,)
+
+ @staticmethod
+ def parse_common_folder_path(path: str) -> Dict[str, str]:
+ """Parse a folder path into its component segments."""
+ m = re.match(r"^folders/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_organization_path(organization: str,) -> str:
+ """Return a fully-qualified organization string."""
+ return "organizations/{organization}".format(organization=organization,)
+
+ @staticmethod
+ def parse_common_organization_path(path: str) -> Dict[str, str]:
+ """Parse a organization path into its component segments."""
+ m = re.match(r"^organizations/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_project_path(project: str,) -> str:
+ """Return a fully-qualified project string."""
+ return "projects/{project}".format(project=project,)
+
+ @staticmethod
+ def parse_common_project_path(path: str) -> Dict[str, str]:
+ """Parse a project path into its component segments."""
+ m = re.match(r"^projects/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_location_path(project: str, location: str,) -> str:
+ """Return a fully-qualified location string."""
+ return "projects/{project}/locations/{location}".format(
+ project=project, location=location,
+ )
+
+ @staticmethod
+ def parse_common_location_path(path: str) -> Dict[str, str]:
+ """Parse a location path into its component segments."""
+ m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ def __init__(
+ self,
+ *,
+ credentials: Optional[credentials.Credentials] = None,
+ transport: Union[str, EndpointServiceTransport, None] = None,
+ client_options: Optional[client_options_lib.ClientOptions] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ ) -> None:
+ """Instantiate the endpoint service client.
+
+ Args:
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ transport (Union[str, EndpointServiceTransport]): The
+ transport to use. If set to None, a transport is chosen
+ automatically.
+ client_options (google.api_core.client_options.ClientOptions): Custom options for the
+ client. It won't take effect if a ``transport`` instance is provided.
+ (1) The ``api_endpoint`` property can be used to override the
+ default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
+ environment variable can also be used to override the endpoint:
+ "always" (always use the default mTLS endpoint), "never" (always
+ use the default regular endpoint) and "auto" (auto switch to the
+ default mTLS endpoint if client certificate is present, this is
+ the default value). However, the ``api_endpoint`` property takes
+ precedence if provided.
+ (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
+ is "true", then the ``client_cert_source`` property can be used
+ to provide client certificate for mutual TLS transport. If
+ not provided, the default SSL client certificate will be used if
+ present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
+ set, no client certificate will be used.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+
+ Raises:
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
+ creation failed for any reason.
+ """
+ if isinstance(client_options, dict):
+ client_options = client_options_lib.from_dict(client_options)
+ if client_options is None:
+ client_options = client_options_lib.ClientOptions()
+
+ # Create SSL credentials for mutual TLS if needed.
+ use_client_cert = bool(
+ util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
+ )
+
+ client_cert_source_func = None
+ is_mtls = False
+ if use_client_cert:
+ if client_options.client_cert_source:
+ is_mtls = True
+ client_cert_source_func = client_options.client_cert_source
+ else:
+ is_mtls = mtls.has_default_client_cert_source()
+ client_cert_source_func = (
+ mtls.default_client_cert_source() if is_mtls else None
+ )
+
+ # Figure out which api endpoint to use.
+ if client_options.api_endpoint is not None:
+ api_endpoint = client_options.api_endpoint
+ else:
+ use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
+ if use_mtls_env == "never":
+ api_endpoint = self.DEFAULT_ENDPOINT
+ elif use_mtls_env == "always":
+ api_endpoint = self.DEFAULT_MTLS_ENDPOINT
+ elif use_mtls_env == "auto":
+ api_endpoint = (
+ self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT
+ )
+ else:
+ raise MutualTLSChannelError(
+ "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
+ )
+
+ # Save or instantiate the transport.
+ # Ordinarily, we provide the transport, but allowing a custom transport
+ # instance provides an extensibility point for unusual situations.
+ if isinstance(transport, EndpointServiceTransport):
+ # transport is a EndpointServiceTransport instance.
+ if credentials or client_options.credentials_file:
+ raise ValueError(
+ "When providing a transport instance, "
+ "provide its credentials directly."
+ )
+ if client_options.scopes:
+ raise ValueError(
+ "When providing a transport instance, "
+ "provide its scopes directly."
+ )
+ self._transport = transport
+ else:
+ Transport = type(self).get_transport_class(transport)
+ self._transport = Transport(
+ credentials=credentials,
+ credentials_file=client_options.credentials_file,
+ host=api_endpoint,
+ scopes=client_options.scopes,
+ client_cert_source_for_mtls=client_cert_source_func,
+ quota_project_id=client_options.quota_project_id,
+ client_info=client_info,
+ )
+
+ def create_endpoint(
+ self,
+ request: endpoint_service.CreateEndpointRequest = None,
+ *,
+ parent: str = None,
+ endpoint: gca_endpoint.Endpoint = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> ga_operation.Operation:
+ r"""Creates an Endpoint.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.CreateEndpointRequest):
+ The request object. Request message for
+ ``EndpointService.CreateEndpoint``.
+ parent (str):
+ Required. The resource name of the Location to create
+ the Endpoint in. Format:
+ ``projects/{project}/locations/{location}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ endpoint (google.cloud.aiplatform_v1.types.Endpoint):
+ Required. The Endpoint to create.
+ This corresponds to the ``endpoint`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation.Operation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Endpoint` Models are deployed into it, and afterwards Endpoint is called to obtain
+ predictions and explanations.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent, endpoint])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a endpoint_service.CreateEndpointRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, endpoint_service.CreateEndpointRequest):
+ request = endpoint_service.CreateEndpointRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+ if endpoint is not None:
+ request.endpoint = endpoint
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.create_endpoint]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = ga_operation.from_gapic(
+ response,
+ self._transport.operations_client,
+ gca_endpoint.Endpoint,
+ metadata_type=endpoint_service.CreateEndpointOperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def get_endpoint(
+ self,
+ request: endpoint_service.GetEndpointRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> endpoint.Endpoint:
+ r"""Gets an Endpoint.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.GetEndpointRequest):
+ The request object. Request message for
+ ``EndpointService.GetEndpoint``
+ name (str):
+ Required. The name of the Endpoint resource. Format:
+ ``projects/{project}/locations/{location}/endpoints/{endpoint}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.types.Endpoint:
+ Models are deployed into it, and
+ afterwards Endpoint is called to obtain
+ predictions and explanations.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a endpoint_service.GetEndpointRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, endpoint_service.GetEndpointRequest):
+ request = endpoint_service.GetEndpointRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.get_endpoint]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ def list_endpoints(
+ self,
+ request: endpoint_service.ListEndpointsRequest = None,
+ *,
+ parent: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> pagers.ListEndpointsPager:
+ r"""Lists Endpoints in a Location.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.ListEndpointsRequest):
+ The request object. Request message for
+ ``EndpointService.ListEndpoints``.
+ parent (str):
+ Required. The resource name of the Location from which
+ to list the Endpoints. Format:
+ ``projects/{project}/locations/{location}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.services.endpoint_service.pagers.ListEndpointsPager:
+ Response message for
+ ``EndpointService.ListEndpoints``.
+
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a endpoint_service.ListEndpointsRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, endpoint_service.ListEndpointsRequest):
+ request = endpoint_service.ListEndpointsRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.list_endpoints]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__iter__` convenience method.
+ response = pagers.ListEndpointsPager(
+ method=rpc, request=request, response=response, metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def update_endpoint(
+ self,
+ request: endpoint_service.UpdateEndpointRequest = None,
+ *,
+ endpoint: gca_endpoint.Endpoint = None,
+ update_mask: field_mask.FieldMask = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> gca_endpoint.Endpoint:
+ r"""Updates an Endpoint.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.UpdateEndpointRequest):
+ The request object. Request message for
+ ``EndpointService.UpdateEndpoint``.
+ endpoint (google.cloud.aiplatform_v1.types.Endpoint):
+ Required. The Endpoint which replaces
+ the resource on the server.
+
+ This corresponds to the ``endpoint`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ update_mask (google.protobuf.field_mask_pb2.FieldMask):
+ Required. The update mask applies to the resource. See
+ `FieldMask `__.
+
+ This corresponds to the ``update_mask`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.types.Endpoint:
+ Models are deployed into it, and
+ afterwards Endpoint is called to obtain
+ predictions and explanations.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([endpoint, update_mask])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a endpoint_service.UpdateEndpointRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, endpoint_service.UpdateEndpointRequest):
+ request = endpoint_service.UpdateEndpointRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if endpoint is not None:
+ request.endpoint = endpoint
+ if update_mask is not None:
+ request.update_mask = update_mask
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.update_endpoint]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata(
+ (("endpoint.name", request.endpoint.name),)
+ ),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ def delete_endpoint(
+ self,
+ request: endpoint_service.DeleteEndpointRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> ga_operation.Operation:
+ r"""Deletes an Endpoint.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.DeleteEndpointRequest):
+ The request object. Request message for
+ ``EndpointService.DeleteEndpoint``.
+ name (str):
+ Required. The name of the Endpoint resource to be
+ deleted. Format:
+ ``projects/{project}/locations/{location}/endpoints/{endpoint}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation.Operation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
+ empty messages in your APIs. A typical example is to
+ use it as the request or the response type of an API
+ method. For instance:
+
+ service Foo {
+ rpc Bar(google.protobuf.Empty) returns
+ (google.protobuf.Empty);
+
+ }
+
+ The JSON representation for Empty is empty JSON
+ object {}.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a endpoint_service.DeleteEndpointRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, endpoint_service.DeleteEndpointRequest):
+ request = endpoint_service.DeleteEndpointRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.delete_endpoint]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = ga_operation.from_gapic(
+ response,
+ self._transport.operations_client,
+ empty.Empty,
+ metadata_type=gca_operation.DeleteOperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def deploy_model(
+ self,
+ request: endpoint_service.DeployModelRequest = None,
+ *,
+ endpoint: str = None,
+ deployed_model: gca_endpoint.DeployedModel = None,
+ traffic_split: Sequence[
+ endpoint_service.DeployModelRequest.TrafficSplitEntry
+ ] = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> ga_operation.Operation:
+ r"""Deploys a Model into this Endpoint, creating a
+ DeployedModel within it.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.DeployModelRequest):
+ The request object. Request message for
+ ``EndpointService.DeployModel``.
+ endpoint (str):
+ Required. The name of the Endpoint resource into which
+ to deploy a Model. Format:
+ ``projects/{project}/locations/{location}/endpoints/{endpoint}``
+
+ This corresponds to the ``endpoint`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ deployed_model (google.cloud.aiplatform_v1.types.DeployedModel):
+ Required. The DeployedModel to be created within the
+ Endpoint. Note that
+ ``Endpoint.traffic_split``
+ must be updated for the DeployedModel to start receiving
+ traffic, either as part of this call, or via
+ ``EndpointService.UpdateEndpoint``.
+
+ This corresponds to the ``deployed_model`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ traffic_split (Sequence[google.cloud.aiplatform_v1.types.DeployModelRequest.TrafficSplitEntry]):
+ A map from a DeployedModel's ID to the percentage of
+ this Endpoint's traffic that should be forwarded to that
+ DeployedModel.
+
+ If this field is non-empty, then the Endpoint's
+ ``traffic_split``
+ will be overwritten with it. To refer to the ID of the
+ just being deployed Model, a "0" should be used, and the
+ actual ID of the new DeployedModel will be filled in its
+ place by this method. The traffic percentage values must
+ add up to 100.
+
+ If this field is empty, then the Endpoint's
+ ``traffic_split``
+ is not updated.
+
+ This corresponds to the ``traffic_split`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation.Operation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:`google.cloud.aiplatform_v1.types.DeployModelResponse`
+ Response message for
+ ``EndpointService.DeployModel``.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([endpoint, deployed_model, traffic_split])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a endpoint_service.DeployModelRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, endpoint_service.DeployModelRequest):
+ request = endpoint_service.DeployModelRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if endpoint is not None:
+ request.endpoint = endpoint
+ if deployed_model is not None:
+ request.deployed_model = deployed_model
+
+ if traffic_split:
+ request.traffic_split.update(traffic_split)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.deploy_model]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("endpoint", request.endpoint),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = ga_operation.from_gapic(
+ response,
+ self._transport.operations_client,
+ endpoint_service.DeployModelResponse,
+ metadata_type=endpoint_service.DeployModelOperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def undeploy_model(
+ self,
+ request: endpoint_service.UndeployModelRequest = None,
+ *,
+ endpoint: str = None,
+ deployed_model_id: str = None,
+ traffic_split: Sequence[
+ endpoint_service.UndeployModelRequest.TrafficSplitEntry
+ ] = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> ga_operation.Operation:
+ r"""Undeploys a Model from an Endpoint, removing a
+ DeployedModel from it, and freeing all resources it's
+ using.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.UndeployModelRequest):
+ The request object. Request message for
+ ``EndpointService.UndeployModel``.
+ endpoint (str):
+ Required. The name of the Endpoint resource from which
+ to undeploy a Model. Format:
+ ``projects/{project}/locations/{location}/endpoints/{endpoint}``
+
+ This corresponds to the ``endpoint`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ deployed_model_id (str):
+ Required. The ID of the DeployedModel
+ to be undeployed from the Endpoint.
+
+ This corresponds to the ``deployed_model_id`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ traffic_split (Sequence[google.cloud.aiplatform_v1.types.UndeployModelRequest.TrafficSplitEntry]):
+ If this field is provided, then the Endpoint's
+ ``traffic_split``
+ will be overwritten with it. If last DeployedModel is
+ being undeployed from the Endpoint, the
+ [Endpoint.traffic_split] will always end up empty when
+ this call returns. A DeployedModel will be successfully
+ undeployed only if it doesn't have any traffic assigned
+ to it when this method executes, or if this field
+ unassigns any traffic to it.
+
+ This corresponds to the ``traffic_split`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation.Operation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:`google.cloud.aiplatform_v1.types.UndeployModelResponse`
+ Response message for
+ ``EndpointService.UndeployModel``.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([endpoint, deployed_model_id, traffic_split])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a endpoint_service.UndeployModelRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, endpoint_service.UndeployModelRequest):
+ request = endpoint_service.UndeployModelRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if endpoint is not None:
+ request.endpoint = endpoint
+ if deployed_model_id is not None:
+ request.deployed_model_id = deployed_model_id
+
+ if traffic_split:
+ request.traffic_split.update(traffic_split)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.undeploy_model]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("endpoint", request.endpoint),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = ga_operation.from_gapic(
+ response,
+ self._transport.operations_client,
+ endpoint_service.UndeployModelResponse,
+ metadata_type=endpoint_service.UndeployModelOperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+
+try:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=pkg_resources.get_distribution(
+ "google-cloud-aiplatform",
+ ).version,
+ )
+except pkg_resources.DistributionNotFound:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
+
+
+__all__ = ("EndpointServiceClient",)
diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/pagers.py b/google/cloud/aiplatform_v1/services/endpoint_service/pagers.py
new file mode 100644
index 0000000000..01ebccdec3
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/endpoint_service/pagers.py
@@ -0,0 +1,149 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple
+
+from google.cloud.aiplatform_v1.types import endpoint
+from google.cloud.aiplatform_v1.types import endpoint_service
+
+
+class ListEndpointsPager:
+ """A pager for iterating through ``list_endpoints`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.cloud.aiplatform_v1.types.ListEndpointsResponse` object, and
+ provides an ``__iter__`` method to iterate through its
+ ``endpoints`` field.
+
+ If there are more pages, the ``__iter__`` method will make additional
+ ``ListEndpoints`` requests and continue to iterate
+ through the ``endpoints`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.cloud.aiplatform_v1.types.ListEndpointsResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[..., endpoint_service.ListEndpointsResponse],
+ request: endpoint_service.ListEndpointsRequest,
+ response: endpoint_service.ListEndpointsResponse,
+ *,
+ metadata: Sequence[Tuple[str, str]] = ()
+ ):
+ """Instantiate the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.cloud.aiplatform_v1.types.ListEndpointsRequest):
+ The initial request object.
+ response (google.cloud.aiplatform_v1.types.ListEndpointsResponse):
+ The initial response object.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ self._method = method
+ self._request = endpoint_service.ListEndpointsRequest(request)
+ self._response = response
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ def pages(self) -> Iterable[endpoint_service.ListEndpointsResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = self._method(self._request, metadata=self._metadata)
+ yield self._response
+
+ def __iter__(self) -> Iterable[endpoint.Endpoint]:
+ for page in self.pages:
+ yield from page.endpoints
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
+
+
+class ListEndpointsAsyncPager:
+ """A pager for iterating through ``list_endpoints`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.cloud.aiplatform_v1.types.ListEndpointsResponse` object, and
+ provides an ``__aiter__`` method to iterate through its
+ ``endpoints`` field.
+
+ If there are more pages, the ``__aiter__`` method will make additional
+ ``ListEndpoints`` requests and continue to iterate
+ through the ``endpoints`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.cloud.aiplatform_v1.types.ListEndpointsResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[..., Awaitable[endpoint_service.ListEndpointsResponse]],
+ request: endpoint_service.ListEndpointsRequest,
+ response: endpoint_service.ListEndpointsResponse,
+ *,
+ metadata: Sequence[Tuple[str, str]] = ()
+ ):
+ """Instantiate the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.cloud.aiplatform_v1.types.ListEndpointsRequest):
+ The initial request object.
+ response (google.cloud.aiplatform_v1.types.ListEndpointsResponse):
+ The initial response object.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ self._method = method
+ self._request = endpoint_service.ListEndpointsRequest(request)
+ self._response = response
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ async def pages(self) -> AsyncIterable[endpoint_service.ListEndpointsResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = await self._method(self._request, metadata=self._metadata)
+ yield self._response
+
+ def __aiter__(self) -> AsyncIterable[endpoint.Endpoint]:
+ async def async_generator():
+ async for page in self.pages:
+ for response in page.endpoints:
+ yield response
+
+ return async_generator()
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/transports/__init__.py b/google/cloud/aiplatform_v1/services/endpoint_service/transports/__init__.py
new file mode 100644
index 0000000000..3d0695461d
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/endpoint_service/transports/__init__.py
@@ -0,0 +1,35 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from collections import OrderedDict
+from typing import Dict, Type
+
+from .base import EndpointServiceTransport
+from .grpc import EndpointServiceGrpcTransport
+from .grpc_asyncio import EndpointServiceGrpcAsyncIOTransport
+
+
+# Compile a registry of transports.
+_transport_registry = OrderedDict() # type: Dict[str, Type[EndpointServiceTransport]]
+_transport_registry["grpc"] = EndpointServiceGrpcTransport
+_transport_registry["grpc_asyncio"] = EndpointServiceGrpcAsyncIOTransport
+
+__all__ = (
+ "EndpointServiceTransport",
+ "EndpointServiceGrpcTransport",
+ "EndpointServiceGrpcAsyncIOTransport",
+)
diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/transports/base.py b/google/cloud/aiplatform_v1/services/endpoint_service/transports/base.py
new file mode 100644
index 0000000000..728c38fec3
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/endpoint_service/transports/base.py
@@ -0,0 +1,208 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import abc
+import typing
+import pkg_resources
+
+from google import auth # type: ignore
+from google.api_core import exceptions # type: ignore
+from google.api_core import gapic_v1 # type: ignore
+from google.api_core import retry as retries # type: ignore
+from google.api_core import operations_v1 # type: ignore
+from google.auth import credentials # type: ignore
+
+from google.cloud.aiplatform_v1.types import endpoint
+from google.cloud.aiplatform_v1.types import endpoint as gca_endpoint
+from google.cloud.aiplatform_v1.types import endpoint_service
+from google.longrunning import operations_pb2 as operations # type: ignore
+
+
+try:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=pkg_resources.get_distribution(
+ "google-cloud-aiplatform",
+ ).version,
+ )
+except pkg_resources.DistributionNotFound:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
+
+
+class EndpointServiceTransport(abc.ABC):
+ """Abstract transport class for EndpointService."""
+
+ AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
+
+ def __init__(
+ self,
+ *,
+ host: str = "aiplatform.googleapis.com",
+ credentials: credentials.Credentials = None,
+ credentials_file: typing.Optional[str] = None,
+ scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES,
+ quota_project_id: typing.Optional[str] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ **kwargs,
+ ) -> None:
+ """Instantiate the transport.
+
+ Args:
+ host (Optional[str]): The hostname to connect to.
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is mutually exclusive with credentials.
+ scope (Optional[Sequence[str]]): A list of scopes.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+ """
+ # Save the hostname. Default to port 443 (HTTPS) if none is specified.
+ if ":" not in host:
+ host += ":443"
+ self._host = host
+
+ # If no credentials are provided, then determine the appropriate
+ # defaults.
+ if credentials and credentials_file:
+ raise exceptions.DuplicateCredentialArgs(
+ "'credentials_file' and 'credentials' are mutually exclusive"
+ )
+
+ if credentials_file is not None:
+ credentials, _ = auth.load_credentials_from_file(
+ credentials_file, scopes=scopes, quota_project_id=quota_project_id
+ )
+
+ elif credentials is None:
+ credentials, _ = auth.default(
+ scopes=scopes, quota_project_id=quota_project_id
+ )
+
+ # Save the credentials.
+ self._credentials = credentials
+
+ # Lifted into its own function so it can be stubbed out during tests.
+ self._prep_wrapped_messages(client_info)
+
+ def _prep_wrapped_messages(self, client_info):
+ # Precompute the wrapped methods.
+ self._wrapped_methods = {
+ self.create_endpoint: gapic_v1.method.wrap_method(
+ self.create_endpoint, default_timeout=None, client_info=client_info,
+ ),
+ self.get_endpoint: gapic_v1.method.wrap_method(
+ self.get_endpoint, default_timeout=None, client_info=client_info,
+ ),
+ self.list_endpoints: gapic_v1.method.wrap_method(
+ self.list_endpoints, default_timeout=None, client_info=client_info,
+ ),
+ self.update_endpoint: gapic_v1.method.wrap_method(
+ self.update_endpoint, default_timeout=None, client_info=client_info,
+ ),
+ self.delete_endpoint: gapic_v1.method.wrap_method(
+ self.delete_endpoint, default_timeout=None, client_info=client_info,
+ ),
+ self.deploy_model: gapic_v1.method.wrap_method(
+ self.deploy_model, default_timeout=None, client_info=client_info,
+ ),
+ self.undeploy_model: gapic_v1.method.wrap_method(
+ self.undeploy_model, default_timeout=None, client_info=client_info,
+ ),
+ }
+
+ @property
+ def operations_client(self) -> operations_v1.OperationsClient:
+ """Return the client designed to process long-running operations."""
+ raise NotImplementedError()
+
+ @property
+ def create_endpoint(
+ self,
+ ) -> typing.Callable[
+ [endpoint_service.CreateEndpointRequest],
+ typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def get_endpoint(
+ self,
+ ) -> typing.Callable[
+ [endpoint_service.GetEndpointRequest],
+ typing.Union[endpoint.Endpoint, typing.Awaitable[endpoint.Endpoint]],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def list_endpoints(
+ self,
+ ) -> typing.Callable[
+ [endpoint_service.ListEndpointsRequest],
+ typing.Union[
+ endpoint_service.ListEndpointsResponse,
+ typing.Awaitable[endpoint_service.ListEndpointsResponse],
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def update_endpoint(
+ self,
+ ) -> typing.Callable[
+ [endpoint_service.UpdateEndpointRequest],
+ typing.Union[gca_endpoint.Endpoint, typing.Awaitable[gca_endpoint.Endpoint]],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def delete_endpoint(
+ self,
+ ) -> typing.Callable[
+ [endpoint_service.DeleteEndpointRequest],
+ typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def deploy_model(
+ self,
+ ) -> typing.Callable[
+ [endpoint_service.DeployModelRequest],
+ typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def undeploy_model(
+ self,
+ ) -> typing.Callable[
+ [endpoint_service.UndeployModelRequest],
+ typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ ]:
+ raise NotImplementedError()
+
+
+__all__ = ("EndpointServiceTransport",)
diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc.py
new file mode 100644
index 0000000000..f0b8b32de1
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc.py
@@ -0,0 +1,456 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import warnings
+from typing import Callable, Dict, Optional, Sequence, Tuple
+
+from google.api_core import grpc_helpers # type: ignore
+from google.api_core import operations_v1 # type: ignore
+from google.api_core import gapic_v1 # type: ignore
+from google import auth # type: ignore
+from google.auth import credentials # type: ignore
+from google.auth.transport.grpc import SslCredentials # type: ignore
+
+import grpc # type: ignore
+
+from google.cloud.aiplatform_v1.types import endpoint
+from google.cloud.aiplatform_v1.types import endpoint as gca_endpoint
+from google.cloud.aiplatform_v1.types import endpoint_service
+from google.longrunning import operations_pb2 as operations # type: ignore
+
+from .base import EndpointServiceTransport, DEFAULT_CLIENT_INFO
+
+
+class EndpointServiceGrpcTransport(EndpointServiceTransport):
+ """gRPC backend transport for EndpointService.
+
+ This class defines the same methods as the primary client, so the
+ primary client can load the underlying transport implementation
+ and call it.
+
+ It sends protocol buffers over the wire using gRPC (which is built on
+ top of HTTP/2); the ``grpcio`` package must be installed.
+ """
+
+ _stubs: Dict[str, Callable]
+
+ def __init__(
+ self,
+ *,
+ host: str = "aiplatform.googleapis.com",
+ credentials: credentials.Credentials = None,
+ credentials_file: str = None,
+ scopes: Sequence[str] = None,
+ channel: grpc.Channel = None,
+ api_mtls_endpoint: str = None,
+ client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
+ ssl_channel_credentials: grpc.ChannelCredentials = None,
+ client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
+ quota_project_id: Optional[str] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ ) -> None:
+ """Instantiate the transport.
+
+ Args:
+ host (Optional[str]): The hostname to connect to.
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ This argument is ignored if ``channel`` is provided.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is ignored if ``channel`` is provided.
+ scopes (Optional(Sequence[str])): A list of scopes. This argument is
+ ignored if ``channel`` is provided.
+ channel (Optional[grpc.Channel]): A ``Channel`` instance through
+ which to make calls.
+ api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
+ If provided, it overrides the ``host`` argument and tries to create
+ a mutual TLS channel with client SSL credentials from
+ ``client_cert_source`` or applicatin default SSL credentials.
+ client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
+ Deprecated. A callback to provide client SSL certificate bytes and
+ private key bytes, both in PEM format. It is ignored if
+ ``api_mtls_endpoint`` is None.
+ ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
+ for grpc channel. It is ignored if ``channel`` is provided.
+ client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
+ A callback to provide client certificate bytes and private key bytes,
+ both in PEM format. It is used to configure mutual TLS channel. It is
+ ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+
+ Raises:
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
+ creation failed for any reason.
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
+ and ``credentials_file`` are passed.
+ """
+ self._ssl_channel_credentials = ssl_channel_credentials
+
+ if api_mtls_endpoint:
+ warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
+ if client_cert_source:
+ warnings.warn("client_cert_source is deprecated", DeprecationWarning)
+
+ if channel:
+ # Sanity check: Ensure that channel and credentials are not both
+ # provided.
+ credentials = False
+
+ # If a channel was explicitly provided, set it.
+ self._grpc_channel = channel
+ self._ssl_channel_credentials = None
+ elif api_mtls_endpoint:
+ host = (
+ api_mtls_endpoint
+ if ":" in api_mtls_endpoint
+ else api_mtls_endpoint + ":443"
+ )
+
+ if credentials is None:
+ credentials, _ = auth.default(
+ scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
+ )
+
+ # Create SSL credentials with client_cert_source or application
+ # default SSL credentials.
+ if client_cert_source:
+ cert, key = client_cert_source()
+ ssl_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+ else:
+ ssl_credentials = SslCredentials().ssl_credentials
+
+ # create a new channel. The provided one is ignored.
+ self._grpc_channel = type(self).create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ ssl_credentials=ssl_credentials,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ self._ssl_channel_credentials = ssl_credentials
+ else:
+ host = host if ":" in host else host + ":443"
+
+ if credentials is None:
+ credentials, _ = auth.default(
+ scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
+ )
+
+ if client_cert_source_for_mtls and not ssl_channel_credentials:
+ cert, key = client_cert_source_for_mtls()
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+
+ # create a new channel. The provided one is ignored.
+ self._grpc_channel = type(self).create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ ssl_credentials=self._ssl_channel_credentials,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+ self._stubs = {} # type: Dict[str, Callable]
+ self._operations_client = None
+
+ # Run the base constructor.
+ super().__init__(
+ host=host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ client_info=client_info,
+ )
+
+ @classmethod
+ def create_channel(
+ cls,
+ host: str = "aiplatform.googleapis.com",
+ credentials: credentials.Credentials = None,
+ credentials_file: str = None,
+ scopes: Optional[Sequence[str]] = None,
+ quota_project_id: Optional[str] = None,
+ **kwargs,
+ ) -> grpc.Channel:
+ """Create and return a gRPC channel object.
+ Args:
+ address (Optional[str]): The host for the channel to use.
+ credentials (Optional[~.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify this application to the service. If
+ none are specified, the client will attempt to ascertain
+ the credentials from the environment.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is mutually exclusive with credentials.
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
+ service. These are only used when credentials are not specified and
+ are passed to :func:`google.auth.default`.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ kwargs (Optional[dict]): Keyword arguments, which are passed to the
+ channel creation.
+ Returns:
+ grpc.Channel: A gRPC channel object.
+
+ Raises:
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
+ and ``credentials_file`` are passed.
+ """
+ scopes = scopes or cls.AUTH_SCOPES
+ return grpc_helpers.create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes,
+ quota_project_id=quota_project_id,
+ **kwargs,
+ )
+
+ @property
+ def grpc_channel(self) -> grpc.Channel:
+ """Return the channel designed to connect to this service.
+ """
+ return self._grpc_channel
+
+ @property
+ def operations_client(self) -> operations_v1.OperationsClient:
+ """Create the client designed to process long-running operations.
+
+ This property caches on the instance; repeated calls return the same
+ client.
+ """
+ # Sanity check: Only create a new client if we do not already have one.
+ if self._operations_client is None:
+ self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
+
+ # Return the client from cache.
+ return self._operations_client
+
+ @property
+ def create_endpoint(
+ self,
+ ) -> Callable[[endpoint_service.CreateEndpointRequest], operations.Operation]:
+ r"""Return a callable for the create endpoint method over gRPC.
+
+ Creates an Endpoint.
+
+ Returns:
+ Callable[[~.CreateEndpointRequest],
+ ~.Operation]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "create_endpoint" not in self._stubs:
+ self._stubs["create_endpoint"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.EndpointService/CreateEndpoint",
+ request_serializer=endpoint_service.CreateEndpointRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["create_endpoint"]
+
+ @property
+ def get_endpoint(
+ self,
+ ) -> Callable[[endpoint_service.GetEndpointRequest], endpoint.Endpoint]:
+ r"""Return a callable for the get endpoint method over gRPC.
+
+ Gets an Endpoint.
+
+ Returns:
+ Callable[[~.GetEndpointRequest],
+ ~.Endpoint]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "get_endpoint" not in self._stubs:
+ self._stubs["get_endpoint"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.EndpointService/GetEndpoint",
+ request_serializer=endpoint_service.GetEndpointRequest.serialize,
+ response_deserializer=endpoint.Endpoint.deserialize,
+ )
+ return self._stubs["get_endpoint"]
+
+ @property
+ def list_endpoints(
+ self,
+ ) -> Callable[
+ [endpoint_service.ListEndpointsRequest], endpoint_service.ListEndpointsResponse
+ ]:
+ r"""Return a callable for the list endpoints method over gRPC.
+
+ Lists Endpoints in a Location.
+
+ Returns:
+ Callable[[~.ListEndpointsRequest],
+ ~.ListEndpointsResponse]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_endpoints" not in self._stubs:
+ self._stubs["list_endpoints"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.EndpointService/ListEndpoints",
+ request_serializer=endpoint_service.ListEndpointsRequest.serialize,
+ response_deserializer=endpoint_service.ListEndpointsResponse.deserialize,
+ )
+ return self._stubs["list_endpoints"]
+
+ @property
+ def update_endpoint(
+ self,
+ ) -> Callable[[endpoint_service.UpdateEndpointRequest], gca_endpoint.Endpoint]:
+ r"""Return a callable for the update endpoint method over gRPC.
+
+ Updates an Endpoint.
+
+ Returns:
+ Callable[[~.UpdateEndpointRequest],
+ ~.Endpoint]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "update_endpoint" not in self._stubs:
+ self._stubs["update_endpoint"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.EndpointService/UpdateEndpoint",
+ request_serializer=endpoint_service.UpdateEndpointRequest.serialize,
+ response_deserializer=gca_endpoint.Endpoint.deserialize,
+ )
+ return self._stubs["update_endpoint"]
+
+ @property
+ def delete_endpoint(
+ self,
+ ) -> Callable[[endpoint_service.DeleteEndpointRequest], operations.Operation]:
+ r"""Return a callable for the delete endpoint method over gRPC.
+
+ Deletes an Endpoint.
+
+ Returns:
+ Callable[[~.DeleteEndpointRequest],
+ ~.Operation]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "delete_endpoint" not in self._stubs:
+ self._stubs["delete_endpoint"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.EndpointService/DeleteEndpoint",
+ request_serializer=endpoint_service.DeleteEndpointRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["delete_endpoint"]
+
+ @property
+ def deploy_model(
+ self,
+ ) -> Callable[[endpoint_service.DeployModelRequest], operations.Operation]:
+ r"""Return a callable for the deploy model method over gRPC.
+
+ Deploys a Model into this Endpoint, creating a
+ DeployedModel within it.
+
+ Returns:
+ Callable[[~.DeployModelRequest],
+ ~.Operation]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "deploy_model" not in self._stubs:
+ self._stubs["deploy_model"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.EndpointService/DeployModel",
+ request_serializer=endpoint_service.DeployModelRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["deploy_model"]
+
+ @property
+ def undeploy_model(
+ self,
+ ) -> Callable[[endpoint_service.UndeployModelRequest], operations.Operation]:
+ r"""Return a callable for the undeploy model method over gRPC.
+
+ Undeploys a Model from an Endpoint, removing a
+ DeployedModel from it, and freeing all resources it's
+ using.
+
+ Returns:
+ Callable[[~.UndeployModelRequest],
+ ~.Operation]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "undeploy_model" not in self._stubs:
+ self._stubs["undeploy_model"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.EndpointService/UndeployModel",
+ request_serializer=endpoint_service.UndeployModelRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["undeploy_model"]
+
+
+__all__ = ("EndpointServiceGrpcTransport",)
diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc_asyncio.py
new file mode 100644
index 0000000000..ef97ba490f
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc_asyncio.py
@@ -0,0 +1,473 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import warnings
+from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple
+
+from google.api_core import gapic_v1 # type: ignore
+from google.api_core import grpc_helpers_async # type: ignore
+from google.api_core import operations_v1 # type: ignore
+from google import auth # type: ignore
+from google.auth import credentials # type: ignore
+from google.auth.transport.grpc import SslCredentials # type: ignore
+
+import grpc # type: ignore
+from grpc.experimental import aio # type: ignore
+
+from google.cloud.aiplatform_v1.types import endpoint
+from google.cloud.aiplatform_v1.types import endpoint as gca_endpoint
+from google.cloud.aiplatform_v1.types import endpoint_service
+from google.longrunning import operations_pb2 as operations # type: ignore
+
+from .base import EndpointServiceTransport, DEFAULT_CLIENT_INFO
+from .grpc import EndpointServiceGrpcTransport
+
+
+class EndpointServiceGrpcAsyncIOTransport(EndpointServiceTransport):
+ """gRPC AsyncIO backend transport for EndpointService.
+
+ This class defines the same methods as the primary client, so the
+ primary client can load the underlying transport implementation
+ and call it.
+
+ It sends protocol buffers over the wire using gRPC (which is built on
+ top of HTTP/2); the ``grpcio`` package must be installed.
+ """
+
+ _grpc_channel: aio.Channel
+ _stubs: Dict[str, Callable] = {}
+
+ @classmethod
+ def create_channel(
+ cls,
+ host: str = "aiplatform.googleapis.com",
+ credentials: credentials.Credentials = None,
+ credentials_file: Optional[str] = None,
+ scopes: Optional[Sequence[str]] = None,
+ quota_project_id: Optional[str] = None,
+ **kwargs,
+ ) -> aio.Channel:
+ """Create and return a gRPC AsyncIO channel object.
+ Args:
+ address (Optional[str]): The host for the channel to use.
+ credentials (Optional[~.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify this application to the service. If
+ none are specified, the client will attempt to ascertain
+ the credentials from the environment.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is ignored if ``channel`` is provided.
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
+ service. These are only used when credentials are not specified and
+ are passed to :func:`google.auth.default`.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ kwargs (Optional[dict]): Keyword arguments, which are passed to the
+ channel creation.
+ Returns:
+ aio.Channel: A gRPC AsyncIO channel object.
+ """
+ scopes = scopes or cls.AUTH_SCOPES
+ return grpc_helpers_async.create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes,
+ quota_project_id=quota_project_id,
+ **kwargs,
+ )
+
+ def __init__(
+ self,
+ *,
+ host: str = "aiplatform.googleapis.com",
+ credentials: credentials.Credentials = None,
+ credentials_file: Optional[str] = None,
+ scopes: Optional[Sequence[str]] = None,
+ channel: aio.Channel = None,
+ api_mtls_endpoint: str = None,
+ client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
+ ssl_channel_credentials: grpc.ChannelCredentials = None,
+ client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
+ quota_project_id=None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ ) -> None:
+ """Instantiate the transport.
+
+ Args:
+ host (Optional[str]): The hostname to connect to.
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ This argument is ignored if ``channel`` is provided.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is ignored if ``channel`` is provided.
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
+ service. These are only used when credentials are not specified and
+ are passed to :func:`google.auth.default`.
+ channel (Optional[aio.Channel]): A ``Channel`` instance through
+ which to make calls.
+ api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
+ If provided, it overrides the ``host`` argument and tries to create
+ a mutual TLS channel with client SSL credentials from
+ ``client_cert_source`` or applicatin default SSL credentials.
+ client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
+ Deprecated. A callback to provide client SSL certificate bytes and
+ private key bytes, both in PEM format. It is ignored if
+ ``api_mtls_endpoint`` is None.
+ ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
+ for grpc channel. It is ignored if ``channel`` is provided.
+ client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
+ A callback to provide client certificate bytes and private key bytes,
+ both in PEM format. It is used to configure mutual TLS channel. It is
+ ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+
+ Raises:
+ google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
+ creation failed for any reason.
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
+ and ``credentials_file`` are passed.
+ """
+ self._ssl_channel_credentials = ssl_channel_credentials
+
+ if api_mtls_endpoint:
+ warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
+ if client_cert_source:
+ warnings.warn("client_cert_source is deprecated", DeprecationWarning)
+
+ if channel:
+ # Sanity check: Ensure that channel and credentials are not both
+ # provided.
+ credentials = False
+
+ # If a channel was explicitly provided, set it.
+ self._grpc_channel = channel
+ self._ssl_channel_credentials = None
+ elif api_mtls_endpoint:
+ host = (
+ api_mtls_endpoint
+ if ":" in api_mtls_endpoint
+ else api_mtls_endpoint + ":443"
+ )
+
+ if credentials is None:
+ credentials, _ = auth.default(
+ scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
+ )
+
+ # Create SSL credentials with client_cert_source or application
+ # default SSL credentials.
+ if client_cert_source:
+ cert, key = client_cert_source()
+ ssl_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+ else:
+ ssl_credentials = SslCredentials().ssl_credentials
+
+ # create a new channel. The provided one is ignored.
+ self._grpc_channel = type(self).create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ ssl_credentials=ssl_credentials,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ self._ssl_channel_credentials = ssl_credentials
+ else:
+ host = host if ":" in host else host + ":443"
+
+ if credentials is None:
+ credentials, _ = auth.default(
+ scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
+ )
+
+ if client_cert_source_for_mtls and not ssl_channel_credentials:
+ cert, key = client_cert_source_for_mtls()
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+
+ # create a new channel. The provided one is ignored.
+ self._grpc_channel = type(self).create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ ssl_credentials=self._ssl_channel_credentials,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+ # Run the base constructor.
+ super().__init__(
+ host=host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ client_info=client_info,
+ )
+
+ self._stubs = {}
+ self._operations_client = None
+
+ @property
+ def grpc_channel(self) -> aio.Channel:
+ """Create the channel designed to connect to this service.
+
+ This property caches on the instance; repeated calls return
+ the same channel.
+ """
+ # Return the channel from cache.
+ return self._grpc_channel
+
+ @property
+ def operations_client(self) -> operations_v1.OperationsAsyncClient:
+ """Create the client designed to process long-running operations.
+
+ This property caches on the instance; repeated calls return the same
+ client.
+ """
+ # Sanity check: Only create a new client if we do not already have one.
+ if self._operations_client is None:
+ self._operations_client = operations_v1.OperationsAsyncClient(
+ self.grpc_channel
+ )
+
+ # Return the client from cache.
+ return self._operations_client
+
+ @property
+ def create_endpoint(
+ self,
+ ) -> Callable[
+ [endpoint_service.CreateEndpointRequest], Awaitable[operations.Operation]
+ ]:
+ r"""Return a callable for the create endpoint method over gRPC.
+
+ Creates an Endpoint.
+
+ Returns:
+ Callable[[~.CreateEndpointRequest],
+ Awaitable[~.Operation]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "create_endpoint" not in self._stubs:
+ self._stubs["create_endpoint"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.EndpointService/CreateEndpoint",
+ request_serializer=endpoint_service.CreateEndpointRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["create_endpoint"]
+
+ @property
+ def get_endpoint(
+ self,
+ ) -> Callable[[endpoint_service.GetEndpointRequest], Awaitable[endpoint.Endpoint]]:
+ r"""Return a callable for the get endpoint method over gRPC.
+
+ Gets an Endpoint.
+
+ Returns:
+ Callable[[~.GetEndpointRequest],
+ Awaitable[~.Endpoint]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "get_endpoint" not in self._stubs:
+ self._stubs["get_endpoint"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.EndpointService/GetEndpoint",
+ request_serializer=endpoint_service.GetEndpointRequest.serialize,
+ response_deserializer=endpoint.Endpoint.deserialize,
+ )
+ return self._stubs["get_endpoint"]
+
+ @property
+ def list_endpoints(
+ self,
+ ) -> Callable[
+ [endpoint_service.ListEndpointsRequest],
+ Awaitable[endpoint_service.ListEndpointsResponse],
+ ]:
+ r"""Return a callable for the list endpoints method over gRPC.
+
+ Lists Endpoints in a Location.
+
+ Returns:
+ Callable[[~.ListEndpointsRequest],
+ Awaitable[~.ListEndpointsResponse]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_endpoints" not in self._stubs:
+ self._stubs["list_endpoints"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.EndpointService/ListEndpoints",
+ request_serializer=endpoint_service.ListEndpointsRequest.serialize,
+ response_deserializer=endpoint_service.ListEndpointsResponse.deserialize,
+ )
+ return self._stubs["list_endpoints"]
+
+ @property
+ def update_endpoint(
+ self,
+ ) -> Callable[
+ [endpoint_service.UpdateEndpointRequest], Awaitable[gca_endpoint.Endpoint]
+ ]:
+ r"""Return a callable for the update endpoint method over gRPC.
+
+ Updates an Endpoint.
+
+ Returns:
+ Callable[[~.UpdateEndpointRequest],
+ Awaitable[~.Endpoint]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "update_endpoint" not in self._stubs:
+ self._stubs["update_endpoint"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.EndpointService/UpdateEndpoint",
+ request_serializer=endpoint_service.UpdateEndpointRequest.serialize,
+ response_deserializer=gca_endpoint.Endpoint.deserialize,
+ )
+ return self._stubs["update_endpoint"]
+
+ @property
+ def delete_endpoint(
+ self,
+ ) -> Callable[
+ [endpoint_service.DeleteEndpointRequest], Awaitable[operations.Operation]
+ ]:
+ r"""Return a callable for the delete endpoint method over gRPC.
+
+ Deletes an Endpoint.
+
+ Returns:
+ Callable[[~.DeleteEndpointRequest],
+ Awaitable[~.Operation]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "delete_endpoint" not in self._stubs:
+ self._stubs["delete_endpoint"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.EndpointService/DeleteEndpoint",
+ request_serializer=endpoint_service.DeleteEndpointRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["delete_endpoint"]
+
+ @property
+ def deploy_model(
+ self,
+ ) -> Callable[
+ [endpoint_service.DeployModelRequest], Awaitable[operations.Operation]
+ ]:
+ r"""Return a callable for the deploy model method over gRPC.
+
+ Deploys a Model into this Endpoint, creating a
+ DeployedModel within it.
+
+ Returns:
+ Callable[[~.DeployModelRequest],
+ Awaitable[~.Operation]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "deploy_model" not in self._stubs:
+ self._stubs["deploy_model"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.EndpointService/DeployModel",
+ request_serializer=endpoint_service.DeployModelRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["deploy_model"]
+
+ @property
+ def undeploy_model(
+ self,
+ ) -> Callable[
+ [endpoint_service.UndeployModelRequest], Awaitable[operations.Operation]
+ ]:
+ r"""Return a callable for the undeploy model method over gRPC.
+
+ Undeploys a Model from an Endpoint, removing a
+ DeployedModel from it, and freeing all resources it's
+ using.
+
+ Returns:
+ Callable[[~.UndeployModelRequest],
+ Awaitable[~.Operation]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "undeploy_model" not in self._stubs:
+ self._stubs["undeploy_model"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.EndpointService/UndeployModel",
+ request_serializer=endpoint_service.UndeployModelRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["undeploy_model"]
+
+
+__all__ = ("EndpointServiceGrpcAsyncIOTransport",)
diff --git a/google/cloud/aiplatform_v1/services/job_service/__init__.py b/google/cloud/aiplatform_v1/services/job_service/__init__.py
new file mode 100644
index 0000000000..5f157047f5
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/job_service/__init__.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from .client import JobServiceClient
+from .async_client import JobServiceAsyncClient
+
+__all__ = (
+ "JobServiceClient",
+ "JobServiceAsyncClient",
+)
diff --git a/google/cloud/aiplatform_v1/services/job_service/async_client.py b/google/cloud/aiplatform_v1/services/job_service/async_client.py
new file mode 100644
index 0000000000..689cb131ea
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/job_service/async_client.py
@@ -0,0 +1,1874 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from collections import OrderedDict
+import functools
+import re
+from typing import Dict, Sequence, Tuple, Type, Union
+import pkg_resources
+
+import google.api_core.client_options as ClientOptions # type: ignore
+from google.api_core import exceptions # type: ignore
+from google.api_core import gapic_v1 # type: ignore
+from google.api_core import retry as retries # type: ignore
+from google.auth import credentials # type: ignore
+from google.oauth2 import service_account # type: ignore
+
+from google.api_core import operation as ga_operation # type: ignore
+from google.api_core import operation_async # type: ignore
+from google.cloud.aiplatform_v1.services.job_service import pagers
+from google.cloud.aiplatform_v1.types import batch_prediction_job
+from google.cloud.aiplatform_v1.types import (
+ batch_prediction_job as gca_batch_prediction_job,
+)
+from google.cloud.aiplatform_v1.types import completion_stats
+from google.cloud.aiplatform_v1.types import custom_job
+from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job
+from google.cloud.aiplatform_v1.types import data_labeling_job
+from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job
+from google.cloud.aiplatform_v1.types import encryption_spec
+from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job
+from google.cloud.aiplatform_v1.types import (
+ hyperparameter_tuning_job as gca_hyperparameter_tuning_job,
+)
+from google.cloud.aiplatform_v1.types import job_service
+from google.cloud.aiplatform_v1.types import job_state
+from google.cloud.aiplatform_v1.types import machine_resources
+from google.cloud.aiplatform_v1.types import manual_batch_tuning_parameters
+from google.cloud.aiplatform_v1.types import operation as gca_operation
+from google.cloud.aiplatform_v1.types import study
+from google.protobuf import empty_pb2 as empty # type: ignore
+from google.protobuf import struct_pb2 as struct # type: ignore
+from google.protobuf import timestamp_pb2 as timestamp # type: ignore
+from google.rpc import status_pb2 as status # type: ignore
+from google.type import money_pb2 as money # type: ignore
+
+from .transports.base import JobServiceTransport, DEFAULT_CLIENT_INFO
+from .transports.grpc_asyncio import JobServiceGrpcAsyncIOTransport
+from .client import JobServiceClient
+
+
+class JobServiceAsyncClient:
+ """A service for creating and managing AI Platform's jobs."""
+
+ _client: JobServiceClient
+
+ DEFAULT_ENDPOINT = JobServiceClient.DEFAULT_ENDPOINT
+ DEFAULT_MTLS_ENDPOINT = JobServiceClient.DEFAULT_MTLS_ENDPOINT
+
+ batch_prediction_job_path = staticmethod(JobServiceClient.batch_prediction_job_path)
+ parse_batch_prediction_job_path = staticmethod(
+ JobServiceClient.parse_batch_prediction_job_path
+ )
+ custom_job_path = staticmethod(JobServiceClient.custom_job_path)
+ parse_custom_job_path = staticmethod(JobServiceClient.parse_custom_job_path)
+ data_labeling_job_path = staticmethod(JobServiceClient.data_labeling_job_path)
+ parse_data_labeling_job_path = staticmethod(
+ JobServiceClient.parse_data_labeling_job_path
+ )
+ dataset_path = staticmethod(JobServiceClient.dataset_path)
+ parse_dataset_path = staticmethod(JobServiceClient.parse_dataset_path)
+ hyperparameter_tuning_job_path = staticmethod(
+ JobServiceClient.hyperparameter_tuning_job_path
+ )
+ parse_hyperparameter_tuning_job_path = staticmethod(
+ JobServiceClient.parse_hyperparameter_tuning_job_path
+ )
+ model_path = staticmethod(JobServiceClient.model_path)
+ parse_model_path = staticmethod(JobServiceClient.parse_model_path)
+ trial_path = staticmethod(JobServiceClient.trial_path)
+ parse_trial_path = staticmethod(JobServiceClient.parse_trial_path)
+
+ common_billing_account_path = staticmethod(
+ JobServiceClient.common_billing_account_path
+ )
+ parse_common_billing_account_path = staticmethod(
+ JobServiceClient.parse_common_billing_account_path
+ )
+
+ common_folder_path = staticmethod(JobServiceClient.common_folder_path)
+ parse_common_folder_path = staticmethod(JobServiceClient.parse_common_folder_path)
+
+ common_organization_path = staticmethod(JobServiceClient.common_organization_path)
+ parse_common_organization_path = staticmethod(
+ JobServiceClient.parse_common_organization_path
+ )
+
+ common_project_path = staticmethod(JobServiceClient.common_project_path)
+ parse_common_project_path = staticmethod(JobServiceClient.parse_common_project_path)
+
+ common_location_path = staticmethod(JobServiceClient.common_location_path)
+ parse_common_location_path = staticmethod(
+ JobServiceClient.parse_common_location_path
+ )
+
+ from_service_account_info = JobServiceClient.from_service_account_info
+ from_service_account_file = JobServiceClient.from_service_account_file
+ from_service_account_json = from_service_account_file
+
+ @property
+ def transport(self) -> JobServiceTransport:
+ """Return the transport used by the client instance.
+
+ Returns:
+ JobServiceTransport: The transport used by the client instance.
+ """
+ return self._client.transport
+
+ get_transport_class = functools.partial(
+ type(JobServiceClient).get_transport_class, type(JobServiceClient)
+ )
+
+ def __init__(
+ self,
+ *,
+ credentials: credentials.Credentials = None,
+ transport: Union[str, JobServiceTransport] = "grpc_asyncio",
+ client_options: ClientOptions = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ ) -> None:
+ """Instantiate the job service client.
+
+ Args:
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ transport (Union[str, ~.JobServiceTransport]): The
+ transport to use. If set to None, a transport is chosen
+ automatically.
+ client_options (ClientOptions): Custom options for the client. It
+ won't take effect if a ``transport`` instance is provided.
+ (1) The ``api_endpoint`` property can be used to override the
+ default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
+ environment variable can also be used to override the endpoint:
+ "always" (always use the default mTLS endpoint), "never" (always
+ use the default regular endpoint) and "auto" (auto switch to the
+ default mTLS endpoint if client certificate is present, this is
+ the default value). However, the ``api_endpoint`` property takes
+ precedence if provided.
+ (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
+ is "true", then the ``client_cert_source`` property can be used
+ to provide client certificate for mutual TLS transport. If
+ not provided, the default SSL client certificate will be used if
+ present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
+ set, no client certificate will be used.
+
+ Raises:
+ google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
+ creation failed for any reason.
+ """
+
+ self._client = JobServiceClient(
+ credentials=credentials,
+ transport=transport,
+ client_options=client_options,
+ client_info=client_info,
+ )
+
+ async def create_custom_job(
+ self,
+ request: job_service.CreateCustomJobRequest = None,
+ *,
+ parent: str = None,
+ custom_job: gca_custom_job.CustomJob = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> gca_custom_job.CustomJob:
+ r"""Creates a CustomJob. A created CustomJob right away
+ will be attempted to be run.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.CreateCustomJobRequest`):
+ The request object. Request message for
+ ``JobService.CreateCustomJob``.
+ parent (:class:`str`):
+ Required. The resource name of the Location to create
+ the CustomJob in. Format:
+ ``projects/{project}/locations/{location}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ custom_job (:class:`google.cloud.aiplatform_v1.types.CustomJob`):
+ Required. The CustomJob to create.
+ This corresponds to the ``custom_job`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.types.CustomJob:
+ Represents a job that runs custom
+ workloads such as a Docker container or
+ a Python package. A CustomJob can have
+ multiple worker pools and each worker
+ pool can have its own machine and input
+ spec. A CustomJob will be cleaned up
+ once the job enters terminal state
+ (failed or succeeded).
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent, custom_job])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = job_service.CreateCustomJobRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+ if custom_job is not None:
+ request.custom_job = custom_job
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.create_custom_job,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ async def get_custom_job(
+ self,
+ request: job_service.GetCustomJobRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> custom_job.CustomJob:
+ r"""Gets a CustomJob.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.GetCustomJobRequest`):
+ The request object. Request message for
+ ``JobService.GetCustomJob``.
+ name (:class:`str`):
+ Required. The name of the CustomJob resource. Format:
+ ``projects/{project}/locations/{location}/customJobs/{custom_job}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.types.CustomJob:
+ Represents a job that runs custom
+ workloads such as a Docker container or
+ a Python package. A CustomJob can have
+ multiple worker pools and each worker
+ pool can have its own machine and input
+ spec. A CustomJob will be cleaned up
+ once the job enters terminal state
+ (failed or succeeded).
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = job_service.GetCustomJobRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.get_custom_job,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ async def list_custom_jobs(
+ self,
+ request: job_service.ListCustomJobsRequest = None,
+ *,
+ parent: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> pagers.ListCustomJobsAsyncPager:
+ r"""Lists CustomJobs in a Location.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.ListCustomJobsRequest`):
+ The request object. Request message for
+ ``JobService.ListCustomJobs``.
+ parent (:class:`str`):
+ Required. The resource name of the Location to list the
+ CustomJobs from. Format:
+ ``projects/{project}/locations/{location}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.services.job_service.pagers.ListCustomJobsAsyncPager:
+ Response message for
+ ``JobService.ListCustomJobs``
+
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = job_service.ListCustomJobsRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.list_custom_jobs,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__aiter__` convenience method.
+ response = pagers.ListCustomJobsAsyncPager(
+ method=rpc, request=request, response=response, metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def delete_custom_job(
+ self,
+ request: job_service.DeleteCustomJobRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Deletes a CustomJob.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.DeleteCustomJobRequest`):
+ The request object. Request message for
+ ``JobService.DeleteCustomJob``.
+ name (:class:`str`):
+ Required. The name of the CustomJob resource to be
+ deleted. Format:
+ ``projects/{project}/locations/{location}/customJobs/{custom_job}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
+ empty messages in your APIs. A typical example is to
+ use it as the request or the response type of an API
+ method. For instance:
+
+ service Foo {
+ rpc Bar(google.protobuf.Empty) returns
+ (google.protobuf.Empty);
+
+ }
+
+ The JSON representation for Empty is empty JSON
+ object {}.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = job_service.DeleteCustomJobRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.delete_custom_job,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ empty.Empty,
+ metadata_type=gca_operation.DeleteOperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def cancel_custom_job(
+ self,
+ request: job_service.CancelCustomJobRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> None:
+ r"""Cancels a CustomJob. Starts asynchronous cancellation on the
+ CustomJob. The server makes a best effort to cancel the job, but
+ success is not guaranteed. Clients can use
+ ``JobService.GetCustomJob``
+ or other methods to check whether the cancellation succeeded or
+ whether the job completed despite cancellation. On successful
+ cancellation, the CustomJob is not deleted; instead it becomes a
+ job with a
+ ``CustomJob.error``
+ value with a ``google.rpc.Status.code`` of
+ 1, corresponding to ``Code.CANCELLED``, and
+ ``CustomJob.state`` is
+ set to ``CANCELLED``.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.CancelCustomJobRequest`):
+ The request object. Request message for
+ ``JobService.CancelCustomJob``.
+ name (:class:`str`):
+ Required. The name of the CustomJob to cancel. Format:
+ ``projects/{project}/locations/{location}/customJobs/{custom_job}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = job_service.CancelCustomJobRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.cancel_custom_job,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ await rpc(
+ request, retry=retry, timeout=timeout, metadata=metadata,
+ )
+
+ async def create_data_labeling_job(
+ self,
+ request: job_service.CreateDataLabelingJobRequest = None,
+ *,
+ parent: str = None,
+ data_labeling_job: gca_data_labeling_job.DataLabelingJob = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> gca_data_labeling_job.DataLabelingJob:
+ r"""Creates a DataLabelingJob.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.CreateDataLabelingJobRequest`):
+ The request object. Request message for
+ [DataLabelingJobService.CreateDataLabelingJob][].
+ parent (:class:`str`):
+ Required. The parent of the DataLabelingJob. Format:
+ ``projects/{project}/locations/{location}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ data_labeling_job (:class:`google.cloud.aiplatform_v1.types.DataLabelingJob`):
+ Required. The DataLabelingJob to
+ create.
+
+ This corresponds to the ``data_labeling_job`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.types.DataLabelingJob:
+ DataLabelingJob is used to trigger a
+ human labeling job on unlabeled data
+ from the following Dataset:
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent, data_labeling_job])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = job_service.CreateDataLabelingJobRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+ if data_labeling_job is not None:
+ request.data_labeling_job = data_labeling_job
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.create_data_labeling_job,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ async def get_data_labeling_job(
+ self,
+ request: job_service.GetDataLabelingJobRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> data_labeling_job.DataLabelingJob:
+ r"""Gets a DataLabelingJob.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.GetDataLabelingJobRequest`):
+ The request object. Request message for
+ [DataLabelingJobService.GetDataLabelingJob][].
+ name (:class:`str`):
+ Required. The name of the DataLabelingJob. Format:
+
+ ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.types.DataLabelingJob:
+ DataLabelingJob is used to trigger a
+ human labeling job on unlabeled data
+ from the following Dataset:
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = job_service.GetDataLabelingJobRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.get_data_labeling_job,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ async def list_data_labeling_jobs(
+ self,
+ request: job_service.ListDataLabelingJobsRequest = None,
+ *,
+ parent: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> pagers.ListDataLabelingJobsAsyncPager:
+ r"""Lists DataLabelingJobs in a Location.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.ListDataLabelingJobsRequest`):
+ The request object. Request message for
+ [DataLabelingJobService.ListDataLabelingJobs][].
+ parent (:class:`str`):
+ Required. The parent of the DataLabelingJob. Format:
+ ``projects/{project}/locations/{location}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.services.job_service.pagers.ListDataLabelingJobsAsyncPager:
+ Response message for
+ ``JobService.ListDataLabelingJobs``.
+
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = job_service.ListDataLabelingJobsRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.list_data_labeling_jobs,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__aiter__` convenience method.
+ response = pagers.ListDataLabelingJobsAsyncPager(
+ method=rpc, request=request, response=response, metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def delete_data_labeling_job(
+ self,
+ request: job_service.DeleteDataLabelingJobRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Deletes a DataLabelingJob.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.DeleteDataLabelingJobRequest`):
+ The request object. Request message for
+ ``JobService.DeleteDataLabelingJob``.
+ name (:class:`str`):
+ Required. The name of the DataLabelingJob to be deleted.
+ Format:
+
+ ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
+ empty messages in your APIs. A typical example is to
+ use it as the request or the response type of an API
+ method. For instance:
+
+ service Foo {
+ rpc Bar(google.protobuf.Empty) returns
+ (google.protobuf.Empty);
+
+ }
+
+ The JSON representation for Empty is empty JSON
+ object {}.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = job_service.DeleteDataLabelingJobRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.delete_data_labeling_job,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ empty.Empty,
+ metadata_type=gca_operation.DeleteOperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def cancel_data_labeling_job(
+ self,
+ request: job_service.CancelDataLabelingJobRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> None:
+ r"""Cancels a DataLabelingJob. Success of cancellation is
+ not guaranteed.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.CancelDataLabelingJobRequest`):
+ The request object. Request message for
+ [DataLabelingJobService.CancelDataLabelingJob][].
+ name (:class:`str`):
+ Required. The name of the DataLabelingJob. Format:
+
+ ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = job_service.CancelDataLabelingJobRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.cancel_data_labeling_job,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ await rpc(
+ request, retry=retry, timeout=timeout, metadata=metadata,
+ )
+
+ async def create_hyperparameter_tuning_job(
+ self,
+ request: job_service.CreateHyperparameterTuningJobRequest = None,
+ *,
+ parent: str = None,
+ hyperparameter_tuning_job: gca_hyperparameter_tuning_job.HyperparameterTuningJob = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> gca_hyperparameter_tuning_job.HyperparameterTuningJob:
+ r"""Creates a HyperparameterTuningJob
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.CreateHyperparameterTuningJobRequest`):
+ The request object. Request message for
+ ``JobService.CreateHyperparameterTuningJob``.
+ parent (:class:`str`):
+ Required. The resource name of the Location to create
+ the HyperparameterTuningJob in. Format:
+ ``projects/{project}/locations/{location}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ hyperparameter_tuning_job (:class:`google.cloud.aiplatform_v1.types.HyperparameterTuningJob`):
+ Required. The HyperparameterTuningJob
+ to create.
+
+ This corresponds to the ``hyperparameter_tuning_job`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.types.HyperparameterTuningJob:
+ Represents a HyperparameterTuningJob.
+ A HyperparameterTuningJob has a Study
+ specification and multiple CustomJobs
+ with identical CustomJob specification.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent, hyperparameter_tuning_job])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = job_service.CreateHyperparameterTuningJobRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+ if hyperparameter_tuning_job is not None:
+ request.hyperparameter_tuning_job = hyperparameter_tuning_job
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.create_hyperparameter_tuning_job,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ async def get_hyperparameter_tuning_job(
+ self,
+ request: job_service.GetHyperparameterTuningJobRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> hyperparameter_tuning_job.HyperparameterTuningJob:
+ r"""Gets a HyperparameterTuningJob
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.GetHyperparameterTuningJobRequest`):
+ The request object. Request message for
+ ``JobService.GetHyperparameterTuningJob``.
+ name (:class:`str`):
+ Required. The name of the HyperparameterTuningJob
+ resource. Format:
+
+ ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.types.HyperparameterTuningJob:
+ Represents a HyperparameterTuningJob.
+ A HyperparameterTuningJob has a Study
+ specification and multiple CustomJobs
+ with identical CustomJob specification.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = job_service.GetHyperparameterTuningJobRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.get_hyperparameter_tuning_job,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ async def list_hyperparameter_tuning_jobs(
+ self,
+ request: job_service.ListHyperparameterTuningJobsRequest = None,
+ *,
+ parent: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> pagers.ListHyperparameterTuningJobsAsyncPager:
+ r"""Lists HyperparameterTuningJobs in a Location.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsRequest`):
+ The request object. Request message for
+ ``JobService.ListHyperparameterTuningJobs``.
+ parent (:class:`str`):
+ Required. The resource name of the Location to list the
+ HyperparameterTuningJobs from. Format:
+ ``projects/{project}/locations/{location}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.services.job_service.pagers.ListHyperparameterTuningJobsAsyncPager:
+ Response message for
+ ``JobService.ListHyperparameterTuningJobs``
+
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = job_service.ListHyperparameterTuningJobsRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.list_hyperparameter_tuning_jobs,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__aiter__` convenience method.
+ response = pagers.ListHyperparameterTuningJobsAsyncPager(
+ method=rpc, request=request, response=response, metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def delete_hyperparameter_tuning_job(
+ self,
+ request: job_service.DeleteHyperparameterTuningJobRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Deletes a HyperparameterTuningJob.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.DeleteHyperparameterTuningJobRequest`):
+ The request object. Request message for
+ ``JobService.DeleteHyperparameterTuningJob``.
+ name (:class:`str`):
+ Required. The name of the HyperparameterTuningJob
+ resource to be deleted. Format:
+
+ ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
+ empty messages in your APIs. A typical example is to
+ use it as the request or the response type of an API
+ method. For instance:
+
+ service Foo {
+ rpc Bar(google.protobuf.Empty) returns
+ (google.protobuf.Empty);
+
+ }
+
+ The JSON representation for Empty is empty JSON
+ object {}.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = job_service.DeleteHyperparameterTuningJobRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.delete_hyperparameter_tuning_job,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ empty.Empty,
+ metadata_type=gca_operation.DeleteOperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def cancel_hyperparameter_tuning_job(
+ self,
+ request: job_service.CancelHyperparameterTuningJobRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> None:
+ r"""Cancels a HyperparameterTuningJob. Starts asynchronous
+ cancellation on the HyperparameterTuningJob. The server makes a
+ best effort to cancel the job, but success is not guaranteed.
+ Clients can use
+ ``JobService.GetHyperparameterTuningJob``
+ or other methods to check whether the cancellation succeeded or
+ whether the job completed despite cancellation. On successful
+ cancellation, the HyperparameterTuningJob is not deleted;
+ instead it becomes a job with a
+ ``HyperparameterTuningJob.error``
+ value with a ``google.rpc.Status.code`` of
+ 1, corresponding to ``Code.CANCELLED``, and
+ ``HyperparameterTuningJob.state``
+ is set to ``CANCELLED``.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.CancelHyperparameterTuningJobRequest`):
+ The request object. Request message for
+ ``JobService.CancelHyperparameterTuningJob``.
+ name (:class:`str`):
+ Required. The name of the HyperparameterTuningJob to
+ cancel. Format:
+
+ ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = job_service.CancelHyperparameterTuningJobRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.cancel_hyperparameter_tuning_job,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ await rpc(
+ request, retry=retry, timeout=timeout, metadata=metadata,
+ )
+
+ async def create_batch_prediction_job(
+ self,
+ request: job_service.CreateBatchPredictionJobRequest = None,
+ *,
+ parent: str = None,
+ batch_prediction_job: gca_batch_prediction_job.BatchPredictionJob = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> gca_batch_prediction_job.BatchPredictionJob:
+ r"""Creates a BatchPredictionJob. A BatchPredictionJob
+ once created will right away be attempted to start.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.CreateBatchPredictionJobRequest`):
+ The request object. Request message for
+ ``JobService.CreateBatchPredictionJob``.
+ parent (:class:`str`):
+ Required. The resource name of the Location to create
+ the BatchPredictionJob in. Format:
+ ``projects/{project}/locations/{location}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ batch_prediction_job (:class:`google.cloud.aiplatform_v1.types.BatchPredictionJob`):
+ Required. The BatchPredictionJob to
+ create.
+
+ This corresponds to the ``batch_prediction_job`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.types.BatchPredictionJob:
+ A job that uses a ``Model`` to produce predictions
+ on multiple [input
+ instances][google.cloud.aiplatform.v1.BatchPredictionJob.input_config].
+ If predictions for significant portion of the
+ instances fail, the job may finish without attempting
+ predictions for all remaining instances.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent, batch_prediction_job])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = job_service.CreateBatchPredictionJobRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+ if batch_prediction_job is not None:
+ request.batch_prediction_job = batch_prediction_job
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.create_batch_prediction_job,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ async def get_batch_prediction_job(
+ self,
+ request: job_service.GetBatchPredictionJobRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> batch_prediction_job.BatchPredictionJob:
+ r"""Gets a BatchPredictionJob
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.GetBatchPredictionJobRequest`):
+ The request object. Request message for
+ ``JobService.GetBatchPredictionJob``.
+ name (:class:`str`):
+ Required. The name of the BatchPredictionJob resource.
+ Format:
+
+ ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.types.BatchPredictionJob:
+ A job that uses a ``Model`` to produce predictions
+ on multiple [input
+ instances][google.cloud.aiplatform.v1.BatchPredictionJob.input_config].
+ If predictions for significant portion of the
+ instances fail, the job may finish without attempting
+ predictions for all remaining instances.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = job_service.GetBatchPredictionJobRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.get_batch_prediction_job,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ async def list_batch_prediction_jobs(
+ self,
+ request: job_service.ListBatchPredictionJobsRequest = None,
+ *,
+ parent: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> pagers.ListBatchPredictionJobsAsyncPager:
+ r"""Lists BatchPredictionJobs in a Location.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.ListBatchPredictionJobsRequest`):
+ The request object. Request message for
+ ``JobService.ListBatchPredictionJobs``.
+ parent (:class:`str`):
+ Required. The resource name of the Location to list the
+ BatchPredictionJobs from. Format:
+ ``projects/{project}/locations/{location}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.services.job_service.pagers.ListBatchPredictionJobsAsyncPager:
+ Response message for
+ ``JobService.ListBatchPredictionJobs``
+
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = job_service.ListBatchPredictionJobsRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.list_batch_prediction_jobs,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__aiter__` convenience method.
+ response = pagers.ListBatchPredictionJobsAsyncPager(
+ method=rpc, request=request, response=response, metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def delete_batch_prediction_job(
+ self,
+ request: job_service.DeleteBatchPredictionJobRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Deletes a BatchPredictionJob. Can only be called on
+ jobs that already finished.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.DeleteBatchPredictionJobRequest`):
+ The request object. Request message for
+ ``JobService.DeleteBatchPredictionJob``.
+ name (:class:`str`):
+ Required. The name of the BatchPredictionJob resource to
+ be deleted. Format:
+
+ ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
+ empty messages in your APIs. A typical example is to
+ use it as the request or the response type of an API
+ method. For instance:
+
+ service Foo {
+ rpc Bar(google.protobuf.Empty) returns
+ (google.protobuf.Empty);
+
+ }
+
+ The JSON representation for Empty is empty JSON
+ object {}.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = job_service.DeleteBatchPredictionJobRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.delete_batch_prediction_job,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ empty.Empty,
+ metadata_type=gca_operation.DeleteOperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def cancel_batch_prediction_job(
+ self,
+ request: job_service.CancelBatchPredictionJobRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> None:
+ r"""Cancels a BatchPredictionJob.
+
+ Starts asynchronous cancellation on the BatchPredictionJob. The
+ server makes the best effort to cancel the job, but success is
+ not guaranteed. Clients can use
+ ``JobService.GetBatchPredictionJob``
+ or other methods to check whether the cancellation succeeded or
+ whether the job completed despite cancellation. On a successful
+ cancellation, the BatchPredictionJob is not deleted;instead its
+ ``BatchPredictionJob.state``
+ is set to ``CANCELLED``. Any files already outputted by the job
+ are not deleted.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.CancelBatchPredictionJobRequest`):
+ The request object. Request message for
+ ``JobService.CancelBatchPredictionJob``.
+ name (:class:`str`):
+ Required. The name of the BatchPredictionJob to cancel.
+ Format:
+
+ ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = job_service.CancelBatchPredictionJobRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.cancel_batch_prediction_job,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ await rpc(
+ request, retry=retry, timeout=timeout, metadata=metadata,
+ )
+
+
+try:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=pkg_resources.get_distribution(
+ "google-cloud-aiplatform",
+ ).version,
+ )
+except pkg_resources.DistributionNotFound:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
+
+
+__all__ = ("JobServiceAsyncClient",)
diff --git a/google/cloud/aiplatform_v1/services/job_service/client.py b/google/cloud/aiplatform_v1/services/job_service/client.py
new file mode 100644
index 0000000000..746ce91c4b
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/job_service/client.py
@@ -0,0 +1,2204 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from collections import OrderedDict
+from distutils import util
+import os
+import re
+from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
+import pkg_resources
+
+from google.api_core import client_options as client_options_lib # type: ignore
+from google.api_core import exceptions # type: ignore
+from google.api_core import gapic_v1 # type: ignore
+from google.api_core import retry as retries # type: ignore
+from google.auth import credentials # type: ignore
+from google.auth.transport import mtls # type: ignore
+from google.auth.transport.grpc import SslCredentials # type: ignore
+from google.auth.exceptions import MutualTLSChannelError # type: ignore
+from google.oauth2 import service_account # type: ignore
+
+from google.api_core import operation as ga_operation # type: ignore
+from google.api_core import operation_async # type: ignore
+from google.cloud.aiplatform_v1.services.job_service import pagers
+from google.cloud.aiplatform_v1.types import batch_prediction_job
+from google.cloud.aiplatform_v1.types import (
+ batch_prediction_job as gca_batch_prediction_job,
+)
+from google.cloud.aiplatform_v1.types import completion_stats
+from google.cloud.aiplatform_v1.types import custom_job
+from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job
+from google.cloud.aiplatform_v1.types import data_labeling_job
+from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job
+from google.cloud.aiplatform_v1.types import encryption_spec
+from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job
+from google.cloud.aiplatform_v1.types import (
+ hyperparameter_tuning_job as gca_hyperparameter_tuning_job,
+)
+from google.cloud.aiplatform_v1.types import job_service
+from google.cloud.aiplatform_v1.types import job_state
+from google.cloud.aiplatform_v1.types import machine_resources
+from google.cloud.aiplatform_v1.types import manual_batch_tuning_parameters
+from google.cloud.aiplatform_v1.types import operation as gca_operation
+from google.cloud.aiplatform_v1.types import study
+from google.protobuf import empty_pb2 as empty # type: ignore
+from google.protobuf import struct_pb2 as struct # type: ignore
+from google.protobuf import timestamp_pb2 as timestamp # type: ignore
+from google.rpc import status_pb2 as status # type: ignore
+from google.type import money_pb2 as money # type: ignore
+
+from .transports.base import JobServiceTransport, DEFAULT_CLIENT_INFO
+from .transports.grpc import JobServiceGrpcTransport
+from .transports.grpc_asyncio import JobServiceGrpcAsyncIOTransport
+
+
+class JobServiceClientMeta(type):
+ """Metaclass for the JobService client.
+
+ This provides class-level methods for building and retrieving
+ support objects (e.g. transport) without polluting the client instance
+ objects.
+ """
+
+ _transport_registry = OrderedDict() # type: Dict[str, Type[JobServiceTransport]]
+ _transport_registry["grpc"] = JobServiceGrpcTransport
+ _transport_registry["grpc_asyncio"] = JobServiceGrpcAsyncIOTransport
+
+ def get_transport_class(cls, label: str = None,) -> Type[JobServiceTransport]:
+ """Return an appropriate transport class.
+
+ Args:
+ label: The name of the desired transport. If none is
+ provided, then the first transport in the registry is used.
+
+ Returns:
+ The transport class to use.
+ """
+ # If a specific transport is requested, return that one.
+ if label:
+ return cls._transport_registry[label]
+
+ # No transport is requested; return the default (that is, the first one
+ # in the dictionary).
+ return next(iter(cls._transport_registry.values()))
+
+
+class JobServiceClient(metaclass=JobServiceClientMeta):
+ """A service for creating and managing AI Platform's jobs."""
+
+ @staticmethod
+ def _get_default_mtls_endpoint(api_endpoint):
+ """Convert api endpoint to mTLS endpoint.
+ Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
+ "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
+ Args:
+ api_endpoint (Optional[str]): the api endpoint to convert.
+ Returns:
+ str: converted mTLS api endpoint.
+ """
+ if not api_endpoint:
+ return api_endpoint
+
+ mtls_endpoint_re = re.compile(
+ r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?"
+ )
+
+ m = mtls_endpoint_re.match(api_endpoint)
+ name, mtls, sandbox, googledomain = m.groups()
+ if mtls or not googledomain:
+ return api_endpoint
+
+ if sandbox:
+ return api_endpoint.replace(
+ "sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
+ )
+
+ return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
+
+ DEFAULT_ENDPOINT = "aiplatform.googleapis.com"
+ DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
+ DEFAULT_ENDPOINT
+ )
+
+ @classmethod
+ def from_service_account_info(cls, info: dict, *args, **kwargs):
+ """Creates an instance of this client using the provided credentials info.
+
+ Args:
+ info (dict): The service account private key info.
+ args: Additional arguments to pass to the constructor.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ JobServiceClient: The constructed client.
+ """
+ credentials = service_account.Credentials.from_service_account_info(info)
+ kwargs["credentials"] = credentials
+ return cls(*args, **kwargs)
+
+ @classmethod
+ def from_service_account_file(cls, filename: str, *args, **kwargs):
+ """Creates an instance of this client using the provided credentials
+ file.
+
+ Args:
+ filename (str): The path to the service account private key json
+ file.
+ args: Additional arguments to pass to the constructor.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ JobServiceClient: The constructed client.
+ """
+ credentials = service_account.Credentials.from_service_account_file(filename)
+ kwargs["credentials"] = credentials
+ return cls(*args, **kwargs)
+
+ from_service_account_json = from_service_account_file
+
+ @property
+ def transport(self) -> JobServiceTransport:
+ """Return the transport used by the client instance.
+
+ Returns:
+ JobServiceTransport: The transport used by the client instance.
+ """
+ return self._transport
+
+ @staticmethod
+ def batch_prediction_job_path(
+ project: str, location: str, batch_prediction_job: str,
+ ) -> str:
+ """Return a fully-qualified batch_prediction_job string."""
+ return "projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}".format(
+ project=project,
+ location=location,
+ batch_prediction_job=batch_prediction_job,
+ )
+
+ @staticmethod
+ def parse_batch_prediction_job_path(path: str) -> Dict[str, str]:
+ """Parse a batch_prediction_job path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/locations/(?P.+?)/batchPredictionJobs/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def custom_job_path(project: str, location: str, custom_job: str,) -> str:
+ """Return a fully-qualified custom_job string."""
+ return "projects/{project}/locations/{location}/customJobs/{custom_job}".format(
+ project=project, location=location, custom_job=custom_job,
+ )
+
+ @staticmethod
+ def parse_custom_job_path(path: str) -> Dict[str, str]:
+ """Parse a custom_job path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/locations/(?P.+?)/customJobs/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def data_labeling_job_path(
+ project: str, location: str, data_labeling_job: str,
+ ) -> str:
+ """Return a fully-qualified data_labeling_job string."""
+ return "projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}".format(
+ project=project, location=location, data_labeling_job=data_labeling_job,
+ )
+
+ @staticmethod
+ def parse_data_labeling_job_path(path: str) -> Dict[str, str]:
+ """Parse a data_labeling_job path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/locations/(?P.+?)/dataLabelingJobs/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def dataset_path(project: str, location: str, dataset: str,) -> str:
+ """Return a fully-qualified dataset string."""
+ return "projects/{project}/locations/{location}/datasets/{dataset}".format(
+ project=project, location=location, dataset=dataset,
+ )
+
+ @staticmethod
+ def parse_dataset_path(path: str) -> Dict[str, str]:
+ """Parse a dataset path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def hyperparameter_tuning_job_path(
+ project: str, location: str, hyperparameter_tuning_job: str,
+ ) -> str:
+ """Return a fully-qualified hyperparameter_tuning_job string."""
+ return "projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}".format(
+ project=project,
+ location=location,
+ hyperparameter_tuning_job=hyperparameter_tuning_job,
+ )
+
+ @staticmethod
+ def parse_hyperparameter_tuning_job_path(path: str) -> Dict[str, str]:
+ """Parse a hyperparameter_tuning_job path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/locations/(?P.+?)/hyperparameterTuningJobs/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def model_path(project: str, location: str, model: str,) -> str:
+ """Return a fully-qualified model string."""
+ return "projects/{project}/locations/{location}/models/{model}".format(
+ project=project, location=location, model=model,
+ )
+
+ @staticmethod
+ def parse_model_path(path: str) -> Dict[str, str]:
+ """Parse a model path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def trial_path(project: str, location: str, study: str, trial: str,) -> str:
+ """Return a fully-qualified trial string."""
+ return "projects/{project}/locations/{location}/studies/{study}/trials/{trial}".format(
+ project=project, location=location, study=study, trial=trial,
+ )
+
+ @staticmethod
+ def parse_trial_path(path: str) -> Dict[str, str]:
+ """Parse a trial path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/locations/(?P.+?)/studies/(?P.+?)/trials/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_billing_account_path(billing_account: str,) -> str:
+ """Return a fully-qualified billing_account string."""
+ return "billingAccounts/{billing_account}".format(
+ billing_account=billing_account,
+ )
+
+ @staticmethod
+ def parse_common_billing_account_path(path: str) -> Dict[str, str]:
+ """Parse a billing_account path into its component segments."""
+ m = re.match(r"^billingAccounts/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_folder_path(folder: str,) -> str:
+ """Return a fully-qualified folder string."""
+ return "folders/{folder}".format(folder=folder,)
+
+ @staticmethod
+ def parse_common_folder_path(path: str) -> Dict[str, str]:
+ """Parse a folder path into its component segments."""
+ m = re.match(r"^folders/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_organization_path(organization: str,) -> str:
+ """Return a fully-qualified organization string."""
+ return "organizations/{organization}".format(organization=organization,)
+
+ @staticmethod
+ def parse_common_organization_path(path: str) -> Dict[str, str]:
+ """Parse a organization path into its component segments."""
+ m = re.match(r"^organizations/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_project_path(project: str,) -> str:
+ """Return a fully-qualified project string."""
+ return "projects/{project}".format(project=project,)
+
+ @staticmethod
+ def parse_common_project_path(path: str) -> Dict[str, str]:
+ """Parse a project path into its component segments."""
+ m = re.match(r"^projects/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_location_path(project: str, location: str,) -> str:
+ """Return a fully-qualified location string."""
+ return "projects/{project}/locations/{location}".format(
+ project=project, location=location,
+ )
+
+ @staticmethod
+ def parse_common_location_path(path: str) -> Dict[str, str]:
+ """Parse a location path into its component segments."""
+ m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ def __init__(
+ self,
+ *,
+ credentials: Optional[credentials.Credentials] = None,
+ transport: Union[str, JobServiceTransport, None] = None,
+ client_options: Optional[client_options_lib.ClientOptions] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ ) -> None:
+ """Instantiate the job service client.
+
+ Args:
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ transport (Union[str, JobServiceTransport]): The
+ transport to use. If set to None, a transport is chosen
+ automatically.
+ client_options (google.api_core.client_options.ClientOptions): Custom options for the
+ client. It won't take effect if a ``transport`` instance is provided.
+ (1) The ``api_endpoint`` property can be used to override the
+ default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
+ environment variable can also be used to override the endpoint:
+ "always" (always use the default mTLS endpoint), "never" (always
+ use the default regular endpoint) and "auto" (auto switch to the
+ default mTLS endpoint if client certificate is present, this is
+ the default value). However, the ``api_endpoint`` property takes
+ precedence if provided.
+ (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
+ is "true", then the ``client_cert_source`` property can be used
+ to provide client certificate for mutual TLS transport. If
+ not provided, the default SSL client certificate will be used if
+ present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
+ set, no client certificate will be used.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+
+ Raises:
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
+ creation failed for any reason.
+ """
+ if isinstance(client_options, dict):
+ client_options = client_options_lib.from_dict(client_options)
+ if client_options is None:
+ client_options = client_options_lib.ClientOptions()
+
+ # Create SSL credentials for mutual TLS if needed.
+ use_client_cert = bool(
+ util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
+ )
+
+ client_cert_source_func = None
+ is_mtls = False
+ if use_client_cert:
+ if client_options.client_cert_source:
+ is_mtls = True
+ client_cert_source_func = client_options.client_cert_source
+ else:
+ is_mtls = mtls.has_default_client_cert_source()
+ client_cert_source_func = (
+ mtls.default_client_cert_source() if is_mtls else None
+ )
+
+ # Figure out which api endpoint to use.
+ if client_options.api_endpoint is not None:
+ api_endpoint = client_options.api_endpoint
+ else:
+ use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
+ if use_mtls_env == "never":
+ api_endpoint = self.DEFAULT_ENDPOINT
+ elif use_mtls_env == "always":
+ api_endpoint = self.DEFAULT_MTLS_ENDPOINT
+ elif use_mtls_env == "auto":
+ api_endpoint = (
+ self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT
+ )
+ else:
+ raise MutualTLSChannelError(
+ "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
+ )
+
+ # Save or instantiate the transport.
+ # Ordinarily, we provide the transport, but allowing a custom transport
+ # instance provides an extensibility point for unusual situations.
+ if isinstance(transport, JobServiceTransport):
+ # transport is a JobServiceTransport instance.
+ if credentials or client_options.credentials_file:
+ raise ValueError(
+ "When providing a transport instance, "
+ "provide its credentials directly."
+ )
+ if client_options.scopes:
+ raise ValueError(
+ "When providing a transport instance, "
+ "provide its scopes directly."
+ )
+ self._transport = transport
+ else:
+ Transport = type(self).get_transport_class(transport)
+ self._transport = Transport(
+ credentials=credentials,
+ credentials_file=client_options.credentials_file,
+ host=api_endpoint,
+ scopes=client_options.scopes,
+ client_cert_source_for_mtls=client_cert_source_func,
+ quota_project_id=client_options.quota_project_id,
+ client_info=client_info,
+ )
+
+ def create_custom_job(
+ self,
+ request: job_service.CreateCustomJobRequest = None,
+ *,
+ parent: str = None,
+ custom_job: gca_custom_job.CustomJob = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> gca_custom_job.CustomJob:
+ r"""Creates a CustomJob. A created CustomJob right away
+ will be attempted to be run.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.CreateCustomJobRequest):
+ The request object. Request message for
+ ``JobService.CreateCustomJob``.
+ parent (str):
+ Required. The resource name of the Location to create
+ the CustomJob in. Format:
+ ``projects/{project}/locations/{location}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ custom_job (google.cloud.aiplatform_v1.types.CustomJob):
+ Required. The CustomJob to create.
+ This corresponds to the ``custom_job`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.types.CustomJob:
+ Represents a job that runs custom
+ workloads such as a Docker container or
+ a Python package. A CustomJob can have
+ multiple worker pools and each worker
+ pool can have its own machine and input
+ spec. A CustomJob will be cleaned up
+ once the job enters terminal state
+ (failed or succeeded).
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent, custom_job])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a job_service.CreateCustomJobRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, job_service.CreateCustomJobRequest):
+ request = job_service.CreateCustomJobRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+ if custom_job is not None:
+ request.custom_job = custom_job
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.create_custom_job]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ def get_custom_job(
+ self,
+ request: job_service.GetCustomJobRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> custom_job.CustomJob:
+ r"""Gets a CustomJob.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.GetCustomJobRequest):
+ The request object. Request message for
+ ``JobService.GetCustomJob``.
+ name (str):
+ Required. The name of the CustomJob resource. Format:
+ ``projects/{project}/locations/{location}/customJobs/{custom_job}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.types.CustomJob:
+ Represents a job that runs custom
+ workloads such as a Docker container or
+ a Python package. A CustomJob can have
+ multiple worker pools and each worker
+ pool can have its own machine and input
+ spec. A CustomJob will be cleaned up
+ once the job enters terminal state
+ (failed or succeeded).
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a job_service.GetCustomJobRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, job_service.GetCustomJobRequest):
+ request = job_service.GetCustomJobRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.get_custom_job]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ def list_custom_jobs(
+ self,
+ request: job_service.ListCustomJobsRequest = None,
+ *,
+ parent: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> pagers.ListCustomJobsPager:
+ r"""Lists CustomJobs in a Location.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.ListCustomJobsRequest):
+ The request object. Request message for
+ ``JobService.ListCustomJobs``.
+ parent (str):
+ Required. The resource name of the Location to list the
+ CustomJobs from. Format:
+ ``projects/{project}/locations/{location}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.services.job_service.pagers.ListCustomJobsPager:
+ Response message for
+ ``JobService.ListCustomJobs``
+
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a job_service.ListCustomJobsRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, job_service.ListCustomJobsRequest):
+ request = job_service.ListCustomJobsRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.list_custom_jobs]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__iter__` convenience method.
+ response = pagers.ListCustomJobsPager(
+ method=rpc, request=request, response=response, metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def delete_custom_job(
+ self,
+ request: job_service.DeleteCustomJobRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> ga_operation.Operation:
+ r"""Deletes a CustomJob.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.DeleteCustomJobRequest):
+ The request object. Request message for
+ ``JobService.DeleteCustomJob``.
+ name (str):
+ Required. The name of the CustomJob resource to be
+ deleted. Format:
+ ``projects/{project}/locations/{location}/customJobs/{custom_job}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation.Operation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
+ empty messages in your APIs. A typical example is to
+ use it as the request or the response type of an API
+ method. For instance:
+
+ service Foo {
+ rpc Bar(google.protobuf.Empty) returns
+ (google.protobuf.Empty);
+
+ }
+
+ The JSON representation for Empty is empty JSON
+ object {}.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a job_service.DeleteCustomJobRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, job_service.DeleteCustomJobRequest):
+ request = job_service.DeleteCustomJobRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.delete_custom_job]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = ga_operation.from_gapic(
+ response,
+ self._transport.operations_client,
+ empty.Empty,
+ metadata_type=gca_operation.DeleteOperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def cancel_custom_job(
+ self,
+ request: job_service.CancelCustomJobRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> None:
+ r"""Cancels a CustomJob. Starts asynchronous cancellation on the
+ CustomJob. The server makes a best effort to cancel the job, but
+ success is not guaranteed. Clients can use
+ ``JobService.GetCustomJob``
+ or other methods to check whether the cancellation succeeded or
+ whether the job completed despite cancellation. On successful
+ cancellation, the CustomJob is not deleted; instead it becomes a
+ job with a
+ ``CustomJob.error``
+ value with a ``google.rpc.Status.code`` of
+ 1, corresponding to ``Code.CANCELLED``, and
+ ``CustomJob.state`` is
+ set to ``CANCELLED``.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.CancelCustomJobRequest):
+ The request object. Request message for
+ ``JobService.CancelCustomJob``.
+ name (str):
+ Required. The name of the CustomJob to cancel. Format:
+ ``projects/{project}/locations/{location}/customJobs/{custom_job}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a job_service.CancelCustomJobRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, job_service.CancelCustomJobRequest):
+ request = job_service.CancelCustomJobRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.cancel_custom_job]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ rpc(
+ request, retry=retry, timeout=timeout, metadata=metadata,
+ )
+
+ def create_data_labeling_job(
+ self,
+ request: job_service.CreateDataLabelingJobRequest = None,
+ *,
+ parent: str = None,
+ data_labeling_job: gca_data_labeling_job.DataLabelingJob = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> gca_data_labeling_job.DataLabelingJob:
+ r"""Creates a DataLabelingJob.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.CreateDataLabelingJobRequest):
+ The request object. Request message for
+ [DataLabelingJobService.CreateDataLabelingJob][].
+ parent (str):
+ Required. The parent of the DataLabelingJob. Format:
+ ``projects/{project}/locations/{location}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ data_labeling_job (google.cloud.aiplatform_v1.types.DataLabelingJob):
+ Required. The DataLabelingJob to
+ create.
+
+ This corresponds to the ``data_labeling_job`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.types.DataLabelingJob:
+ DataLabelingJob is used to trigger a
+ human labeling job on unlabeled data
+ from the following Dataset:
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent, data_labeling_job])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a job_service.CreateDataLabelingJobRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, job_service.CreateDataLabelingJobRequest):
+ request = job_service.CreateDataLabelingJobRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+ if data_labeling_job is not None:
+ request.data_labeling_job = data_labeling_job
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.create_data_labeling_job]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ def get_data_labeling_job(
+ self,
+ request: job_service.GetDataLabelingJobRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> data_labeling_job.DataLabelingJob:
+ r"""Gets a DataLabelingJob.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.GetDataLabelingJobRequest):
+ The request object. Request message for
+ [DataLabelingJobService.GetDataLabelingJob][].
+ name (str):
+ Required. The name of the DataLabelingJob. Format:
+
+ ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.types.DataLabelingJob:
+ DataLabelingJob is used to trigger a
+ human labeling job on unlabeled data
+ from the following Dataset:
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a job_service.GetDataLabelingJobRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, job_service.GetDataLabelingJobRequest):
+ request = job_service.GetDataLabelingJobRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.get_data_labeling_job]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ def list_data_labeling_jobs(
+ self,
+ request: job_service.ListDataLabelingJobsRequest = None,
+ *,
+ parent: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> pagers.ListDataLabelingJobsPager:
+ r"""Lists DataLabelingJobs in a Location.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.ListDataLabelingJobsRequest):
+ The request object. Request message for
+ [DataLabelingJobService.ListDataLabelingJobs][].
+ parent (str):
+ Required. The parent of the DataLabelingJob. Format:
+ ``projects/{project}/locations/{location}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.services.job_service.pagers.ListDataLabelingJobsPager:
+ Response message for
+ ``JobService.ListDataLabelingJobs``.
+
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a job_service.ListDataLabelingJobsRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, job_service.ListDataLabelingJobsRequest):
+ request = job_service.ListDataLabelingJobsRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.list_data_labeling_jobs]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__iter__` convenience method.
+ response = pagers.ListDataLabelingJobsPager(
+ method=rpc, request=request, response=response, metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def delete_data_labeling_job(
+ self,
+ request: job_service.DeleteDataLabelingJobRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> ga_operation.Operation:
+ r"""Deletes a DataLabelingJob.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.DeleteDataLabelingJobRequest):
+ The request object. Request message for
+ ``JobService.DeleteDataLabelingJob``.
+ name (str):
+ Required. The name of the DataLabelingJob to be deleted.
+ Format:
+
+ ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation.Operation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
+ empty messages in your APIs. A typical example is to
+ use it as the request or the response type of an API
+ method. For instance:
+
+ service Foo {
+ rpc Bar(google.protobuf.Empty) returns
+ (google.protobuf.Empty);
+
+ }
+
+ The JSON representation for Empty is empty JSON
+ object {}.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a job_service.DeleteDataLabelingJobRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, job_service.DeleteDataLabelingJobRequest):
+ request = job_service.DeleteDataLabelingJobRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.delete_data_labeling_job]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = ga_operation.from_gapic(
+ response,
+ self._transport.operations_client,
+ empty.Empty,
+ metadata_type=gca_operation.DeleteOperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def cancel_data_labeling_job(
+ self,
+ request: job_service.CancelDataLabelingJobRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> None:
+ r"""Cancels a DataLabelingJob. Success of cancellation is
+ not guaranteed.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.CancelDataLabelingJobRequest):
+ The request object. Request message for
+ [DataLabelingJobService.CancelDataLabelingJob][].
+ name (str):
+ Required. The name of the DataLabelingJob. Format:
+
+ ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a job_service.CancelDataLabelingJobRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, job_service.CancelDataLabelingJobRequest):
+ request = job_service.CancelDataLabelingJobRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.cancel_data_labeling_job]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ rpc(
+ request, retry=retry, timeout=timeout, metadata=metadata,
+ )
+
+ def create_hyperparameter_tuning_job(
+ self,
+ request: job_service.CreateHyperparameterTuningJobRequest = None,
+ *,
+ parent: str = None,
+ hyperparameter_tuning_job: gca_hyperparameter_tuning_job.HyperparameterTuningJob = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> gca_hyperparameter_tuning_job.HyperparameterTuningJob:
+ r"""Creates a HyperparameterTuningJob
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.CreateHyperparameterTuningJobRequest):
+ The request object. Request message for
+ ``JobService.CreateHyperparameterTuningJob``.
+ parent (str):
+ Required. The resource name of the Location to create
+ the HyperparameterTuningJob in. Format:
+ ``projects/{project}/locations/{location}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ hyperparameter_tuning_job (google.cloud.aiplatform_v1.types.HyperparameterTuningJob):
+ Required. The HyperparameterTuningJob
+ to create.
+
+ This corresponds to the ``hyperparameter_tuning_job`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.types.HyperparameterTuningJob:
+ Represents a HyperparameterTuningJob.
+ A HyperparameterTuningJob has a Study
+ specification and multiple CustomJobs
+ with identical CustomJob specification.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent, hyperparameter_tuning_job])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a job_service.CreateHyperparameterTuningJobRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, job_service.CreateHyperparameterTuningJobRequest):
+ request = job_service.CreateHyperparameterTuningJobRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+ if hyperparameter_tuning_job is not None:
+ request.hyperparameter_tuning_job = hyperparameter_tuning_job
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[
+ self._transport.create_hyperparameter_tuning_job
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ def get_hyperparameter_tuning_job(
+ self,
+ request: job_service.GetHyperparameterTuningJobRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> hyperparameter_tuning_job.HyperparameterTuningJob:
+ r"""Gets a HyperparameterTuningJob
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.GetHyperparameterTuningJobRequest):
+ The request object. Request message for
+ ``JobService.GetHyperparameterTuningJob``.
+ name (str):
+ Required. The name of the HyperparameterTuningJob
+ resource. Format:
+
+ ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.types.HyperparameterTuningJob:
+ Represents a HyperparameterTuningJob.
+ A HyperparameterTuningJob has a Study
+ specification and multiple CustomJobs
+ with identical CustomJob specification.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a job_service.GetHyperparameterTuningJobRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, job_service.GetHyperparameterTuningJobRequest):
+ request = job_service.GetHyperparameterTuningJobRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[
+ self._transport.get_hyperparameter_tuning_job
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ def list_hyperparameter_tuning_jobs(
+ self,
+ request: job_service.ListHyperparameterTuningJobsRequest = None,
+ *,
+ parent: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> pagers.ListHyperparameterTuningJobsPager:
+ r"""Lists HyperparameterTuningJobs in a Location.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsRequest):
+ The request object. Request message for
+ ``JobService.ListHyperparameterTuningJobs``.
+ parent (str):
+ Required. The resource name of the Location to list the
+ HyperparameterTuningJobs from. Format:
+ ``projects/{project}/locations/{location}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.services.job_service.pagers.ListHyperparameterTuningJobsPager:
+ Response message for
+ ``JobService.ListHyperparameterTuningJobs``
+
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a job_service.ListHyperparameterTuningJobsRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, job_service.ListHyperparameterTuningJobsRequest):
+ request = job_service.ListHyperparameterTuningJobsRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[
+ self._transport.list_hyperparameter_tuning_jobs
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__iter__` convenience method.
+ response = pagers.ListHyperparameterTuningJobsPager(
+ method=rpc, request=request, response=response, metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def delete_hyperparameter_tuning_job(
+ self,
+ request: job_service.DeleteHyperparameterTuningJobRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> ga_operation.Operation:
+ r"""Deletes a HyperparameterTuningJob.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.DeleteHyperparameterTuningJobRequest):
+ The request object. Request message for
+ ``JobService.DeleteHyperparameterTuningJob``.
+ name (str):
+ Required. The name of the HyperparameterTuningJob
+ resource to be deleted. Format:
+
+ ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation.Operation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
+ empty messages in your APIs. A typical example is to
+ use it as the request or the response type of an API
+ method. For instance:
+
+ service Foo {
+ rpc Bar(google.protobuf.Empty) returns
+ (google.protobuf.Empty);
+
+ }
+
+ The JSON representation for Empty is empty JSON
+ object {}.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a job_service.DeleteHyperparameterTuningJobRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, job_service.DeleteHyperparameterTuningJobRequest):
+ request = job_service.DeleteHyperparameterTuningJobRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[
+ self._transport.delete_hyperparameter_tuning_job
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = ga_operation.from_gapic(
+ response,
+ self._transport.operations_client,
+ empty.Empty,
+ metadata_type=gca_operation.DeleteOperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def cancel_hyperparameter_tuning_job(
+ self,
+ request: job_service.CancelHyperparameterTuningJobRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> None:
+ r"""Cancels a HyperparameterTuningJob. Starts asynchronous
+ cancellation on the HyperparameterTuningJob. The server makes a
+ best effort to cancel the job, but success is not guaranteed.
+ Clients can use
+ ``JobService.GetHyperparameterTuningJob``
+ or other methods to check whether the cancellation succeeded or
+ whether the job completed despite cancellation. On successful
+ cancellation, the HyperparameterTuningJob is not deleted;
+ instead it becomes a job with a
+ ``HyperparameterTuningJob.error``
+ value with a ``google.rpc.Status.code`` of
+ 1, corresponding to ``Code.CANCELLED``, and
+ ``HyperparameterTuningJob.state``
+ is set to ``CANCELLED``.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.CancelHyperparameterTuningJobRequest):
+ The request object. Request message for
+ ``JobService.CancelHyperparameterTuningJob``.
+ name (str):
+ Required. The name of the HyperparameterTuningJob to
+ cancel. Format:
+
+ ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a job_service.CancelHyperparameterTuningJobRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, job_service.CancelHyperparameterTuningJobRequest):
+ request = job_service.CancelHyperparameterTuningJobRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[
+ self._transport.cancel_hyperparameter_tuning_job
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ rpc(
+ request, retry=retry, timeout=timeout, metadata=metadata,
+ )
+
+ def create_batch_prediction_job(
+ self,
+ request: job_service.CreateBatchPredictionJobRequest = None,
+ *,
+ parent: str = None,
+ batch_prediction_job: gca_batch_prediction_job.BatchPredictionJob = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> gca_batch_prediction_job.BatchPredictionJob:
+ r"""Creates a BatchPredictionJob. A BatchPredictionJob
+ once created will right away be attempted to start.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.CreateBatchPredictionJobRequest):
+ The request object. Request message for
+ ``JobService.CreateBatchPredictionJob``.
+ parent (str):
+ Required. The resource name of the Location to create
+ the BatchPredictionJob in. Format:
+ ``projects/{project}/locations/{location}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ batch_prediction_job (google.cloud.aiplatform_v1.types.BatchPredictionJob):
+ Required. The BatchPredictionJob to
+ create.
+
+ This corresponds to the ``batch_prediction_job`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.types.BatchPredictionJob:
+ A job that uses a ``Model`` to produce predictions
+ on multiple [input
+ instances][google.cloud.aiplatform.v1.BatchPredictionJob.input_config].
+ If predictions for significant portion of the
+ instances fail, the job may finish without attempting
+ predictions for all remaining instances.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent, batch_prediction_job])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a job_service.CreateBatchPredictionJobRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, job_service.CreateBatchPredictionJobRequest):
+ request = job_service.CreateBatchPredictionJobRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+ if batch_prediction_job is not None:
+ request.batch_prediction_job = batch_prediction_job
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[
+ self._transport.create_batch_prediction_job
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ def get_batch_prediction_job(
+ self,
+ request: job_service.GetBatchPredictionJobRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> batch_prediction_job.BatchPredictionJob:
+ r"""Gets a BatchPredictionJob
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.GetBatchPredictionJobRequest):
+ The request object. Request message for
+ ``JobService.GetBatchPredictionJob``.
+ name (str):
+ Required. The name of the BatchPredictionJob resource.
+ Format:
+
+ ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.types.BatchPredictionJob:
+ A job that uses a ``Model`` to produce predictions
+ on multiple [input
+ instances][google.cloud.aiplatform.v1.BatchPredictionJob.input_config].
+ If predictions for significant portion of the
+ instances fail, the job may finish without attempting
+ predictions for all remaining instances.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a job_service.GetBatchPredictionJobRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, job_service.GetBatchPredictionJobRequest):
+ request = job_service.GetBatchPredictionJobRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.get_batch_prediction_job]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ def list_batch_prediction_jobs(
+ self,
+ request: job_service.ListBatchPredictionJobsRequest = None,
+ *,
+ parent: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> pagers.ListBatchPredictionJobsPager:
+ r"""Lists BatchPredictionJobs in a Location.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.ListBatchPredictionJobsRequest):
+ The request object. Request message for
+ ``JobService.ListBatchPredictionJobs``.
+ parent (str):
+ Required. The resource name of the Location to list the
+ BatchPredictionJobs from. Format:
+ ``projects/{project}/locations/{location}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.services.job_service.pagers.ListBatchPredictionJobsPager:
+ Response message for
+ ``JobService.ListBatchPredictionJobs``
+
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a job_service.ListBatchPredictionJobsRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, job_service.ListBatchPredictionJobsRequest):
+ request = job_service.ListBatchPredictionJobsRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[
+ self._transport.list_batch_prediction_jobs
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__iter__` convenience method.
+ response = pagers.ListBatchPredictionJobsPager(
+ method=rpc, request=request, response=response, metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def delete_batch_prediction_job(
+ self,
+ request: job_service.DeleteBatchPredictionJobRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> ga_operation.Operation:
+ r"""Deletes a BatchPredictionJob. Can only be called on
+ jobs that already finished.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.DeleteBatchPredictionJobRequest):
+ The request object. Request message for
+ ``JobService.DeleteBatchPredictionJob``.
+ name (str):
+ Required. The name of the BatchPredictionJob resource to
+ be deleted. Format:
+
+ ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation.Operation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
+ empty messages in your APIs. A typical example is to
+ use it as the request or the response type of an API
+ method. For instance:
+
+ service Foo {
+ rpc Bar(google.protobuf.Empty) returns
+ (google.protobuf.Empty);
+
+ }
+
+ The JSON representation for Empty is empty JSON
+ object {}.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a job_service.DeleteBatchPredictionJobRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, job_service.DeleteBatchPredictionJobRequest):
+ request = job_service.DeleteBatchPredictionJobRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[
+ self._transport.delete_batch_prediction_job
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = ga_operation.from_gapic(
+ response,
+ self._transport.operations_client,
+ empty.Empty,
+ metadata_type=gca_operation.DeleteOperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def cancel_batch_prediction_job(
+ self,
+ request: job_service.CancelBatchPredictionJobRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> None:
+ r"""Cancels a BatchPredictionJob.
+
+ Starts asynchronous cancellation on the BatchPredictionJob. The
+ server makes the best effort to cancel the job, but success is
+ not guaranteed. Clients can use
+ ``JobService.GetBatchPredictionJob``
+ or other methods to check whether the cancellation succeeded or
+ whether the job completed despite cancellation. On a successful
+ cancellation, the BatchPredictionJob is not deleted;instead its
+ ``BatchPredictionJob.state``
+ is set to ``CANCELLED``. Any files already outputted by the job
+ are not deleted.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.CancelBatchPredictionJobRequest):
+ The request object. Request message for
+ ``JobService.CancelBatchPredictionJob``.
+ name (str):
+ Required. The name of the BatchPredictionJob to cancel.
+ Format:
+
+ ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a job_service.CancelBatchPredictionJobRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, job_service.CancelBatchPredictionJobRequest):
+ request = job_service.CancelBatchPredictionJobRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[
+ self._transport.cancel_batch_prediction_job
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ rpc(
+ request, retry=retry, timeout=timeout, metadata=metadata,
+ )
+
+
+try:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=pkg_resources.get_distribution(
+ "google-cloud-aiplatform",
+ ).version,
+ )
+except pkg_resources.DistributionNotFound:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
+
+
+__all__ = ("JobServiceClient",)
diff --git a/google/cloud/aiplatform_v1/services/job_service/pagers.py b/google/cloud/aiplatform_v1/services/job_service/pagers.py
new file mode 100644
index 0000000000..b5a0f4b929
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/job_service/pagers.py
@@ -0,0 +1,542 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple
+
+from google.cloud.aiplatform_v1.types import batch_prediction_job
+from google.cloud.aiplatform_v1.types import custom_job
+from google.cloud.aiplatform_v1.types import data_labeling_job
+from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job
+from google.cloud.aiplatform_v1.types import job_service
+
+
+class ListCustomJobsPager:
+ """A pager for iterating through ``list_custom_jobs`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.cloud.aiplatform_v1.types.ListCustomJobsResponse` object, and
+ provides an ``__iter__`` method to iterate through its
+ ``custom_jobs`` field.
+
+ If there are more pages, the ``__iter__`` method will make additional
+ ``ListCustomJobs`` requests and continue to iterate
+ through the ``custom_jobs`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.cloud.aiplatform_v1.types.ListCustomJobsResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[..., job_service.ListCustomJobsResponse],
+ request: job_service.ListCustomJobsRequest,
+ response: job_service.ListCustomJobsResponse,
+ *,
+ metadata: Sequence[Tuple[str, str]] = ()
+ ):
+ """Instantiate the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.cloud.aiplatform_v1.types.ListCustomJobsRequest):
+ The initial request object.
+ response (google.cloud.aiplatform_v1.types.ListCustomJobsResponse):
+ The initial response object.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ self._method = method
+ self._request = job_service.ListCustomJobsRequest(request)
+ self._response = response
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ def pages(self) -> Iterable[job_service.ListCustomJobsResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = self._method(self._request, metadata=self._metadata)
+ yield self._response
+
+ def __iter__(self) -> Iterable[custom_job.CustomJob]:
+ for page in self.pages:
+ yield from page.custom_jobs
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
+
+
+class ListCustomJobsAsyncPager:
+ """A pager for iterating through ``list_custom_jobs`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.cloud.aiplatform_v1.types.ListCustomJobsResponse` object, and
+ provides an ``__aiter__`` method to iterate through its
+ ``custom_jobs`` field.
+
+ If there are more pages, the ``__aiter__`` method will make additional
+ ``ListCustomJobs`` requests and continue to iterate
+ through the ``custom_jobs`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.cloud.aiplatform_v1.types.ListCustomJobsResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[..., Awaitable[job_service.ListCustomJobsResponse]],
+ request: job_service.ListCustomJobsRequest,
+ response: job_service.ListCustomJobsResponse,
+ *,
+ metadata: Sequence[Tuple[str, str]] = ()
+ ):
+ """Instantiate the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.cloud.aiplatform_v1.types.ListCustomJobsRequest):
+ The initial request object.
+ response (google.cloud.aiplatform_v1.types.ListCustomJobsResponse):
+ The initial response object.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ self._method = method
+ self._request = job_service.ListCustomJobsRequest(request)
+ self._response = response
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ async def pages(self) -> AsyncIterable[job_service.ListCustomJobsResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = await self._method(self._request, metadata=self._metadata)
+ yield self._response
+
+ def __aiter__(self) -> AsyncIterable[custom_job.CustomJob]:
+ async def async_generator():
+ async for page in self.pages:
+ for response in page.custom_jobs:
+ yield response
+
+ return async_generator()
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
+
+
+class ListDataLabelingJobsPager:
+ """A pager for iterating through ``list_data_labeling_jobs`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.cloud.aiplatform_v1.types.ListDataLabelingJobsResponse` object, and
+ provides an ``__iter__`` method to iterate through its
+ ``data_labeling_jobs`` field.
+
+ If there are more pages, the ``__iter__`` method will make additional
+ ``ListDataLabelingJobs`` requests and continue to iterate
+ through the ``data_labeling_jobs`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.cloud.aiplatform_v1.types.ListDataLabelingJobsResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[..., job_service.ListDataLabelingJobsResponse],
+ request: job_service.ListDataLabelingJobsRequest,
+ response: job_service.ListDataLabelingJobsResponse,
+ *,
+ metadata: Sequence[Tuple[str, str]] = ()
+ ):
+ """Instantiate the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.cloud.aiplatform_v1.types.ListDataLabelingJobsRequest):
+ The initial request object.
+ response (google.cloud.aiplatform_v1.types.ListDataLabelingJobsResponse):
+ The initial response object.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ self._method = method
+ self._request = job_service.ListDataLabelingJobsRequest(request)
+ self._response = response
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ def pages(self) -> Iterable[job_service.ListDataLabelingJobsResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = self._method(self._request, metadata=self._metadata)
+ yield self._response
+
+ def __iter__(self) -> Iterable[data_labeling_job.DataLabelingJob]:
+ for page in self.pages:
+ yield from page.data_labeling_jobs
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
+
+
+class ListDataLabelingJobsAsyncPager:
+ """A pager for iterating through ``list_data_labeling_jobs`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.cloud.aiplatform_v1.types.ListDataLabelingJobsResponse` object, and
+ provides an ``__aiter__`` method to iterate through its
+ ``data_labeling_jobs`` field.
+
+ If there are more pages, the ``__aiter__`` method will make additional
+ ``ListDataLabelingJobs`` requests and continue to iterate
+ through the ``data_labeling_jobs`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.cloud.aiplatform_v1.types.ListDataLabelingJobsResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[..., Awaitable[job_service.ListDataLabelingJobsResponse]],
+ request: job_service.ListDataLabelingJobsRequest,
+ response: job_service.ListDataLabelingJobsResponse,
+ *,
+ metadata: Sequence[Tuple[str, str]] = ()
+ ):
+ """Instantiate the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.cloud.aiplatform_v1.types.ListDataLabelingJobsRequest):
+ The initial request object.
+ response (google.cloud.aiplatform_v1.types.ListDataLabelingJobsResponse):
+ The initial response object.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ self._method = method
+ self._request = job_service.ListDataLabelingJobsRequest(request)
+ self._response = response
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ async def pages(self) -> AsyncIterable[job_service.ListDataLabelingJobsResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = await self._method(self._request, metadata=self._metadata)
+ yield self._response
+
+ def __aiter__(self) -> AsyncIterable[data_labeling_job.DataLabelingJob]:
+ async def async_generator():
+ async for page in self.pages:
+ for response in page.data_labeling_jobs:
+ yield response
+
+ return async_generator()
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
+
+
+class ListHyperparameterTuningJobsPager:
+ """A pager for iterating through ``list_hyperparameter_tuning_jobs`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsResponse` object, and
+ provides an ``__iter__`` method to iterate through its
+ ``hyperparameter_tuning_jobs`` field.
+
+ If there are more pages, the ``__iter__`` method will make additional
+ ``ListHyperparameterTuningJobs`` requests and continue to iterate
+ through the ``hyperparameter_tuning_jobs`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[..., job_service.ListHyperparameterTuningJobsResponse],
+ request: job_service.ListHyperparameterTuningJobsRequest,
+ response: job_service.ListHyperparameterTuningJobsResponse,
+ *,
+ metadata: Sequence[Tuple[str, str]] = ()
+ ):
+ """Instantiate the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsRequest):
+ The initial request object.
+ response (google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsResponse):
+ The initial response object.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ self._method = method
+ self._request = job_service.ListHyperparameterTuningJobsRequest(request)
+ self._response = response
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ def pages(self) -> Iterable[job_service.ListHyperparameterTuningJobsResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = self._method(self._request, metadata=self._metadata)
+ yield self._response
+
+ def __iter__(self) -> Iterable[hyperparameter_tuning_job.HyperparameterTuningJob]:
+ for page in self.pages:
+ yield from page.hyperparameter_tuning_jobs
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
+
+
+class ListHyperparameterTuningJobsAsyncPager:
+ """A pager for iterating through ``list_hyperparameter_tuning_jobs`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsResponse` object, and
+ provides an ``__aiter__`` method to iterate through its
+ ``hyperparameter_tuning_jobs`` field.
+
+ If there are more pages, the ``__aiter__`` method will make additional
+ ``ListHyperparameterTuningJobs`` requests and continue to iterate
+ through the ``hyperparameter_tuning_jobs`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[
+ ..., Awaitable[job_service.ListHyperparameterTuningJobsResponse]
+ ],
+ request: job_service.ListHyperparameterTuningJobsRequest,
+ response: job_service.ListHyperparameterTuningJobsResponse,
+ *,
+ metadata: Sequence[Tuple[str, str]] = ()
+ ):
+ """Instantiate the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsRequest):
+ The initial request object.
+ response (google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsResponse):
+ The initial response object.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ self._method = method
+ self._request = job_service.ListHyperparameterTuningJobsRequest(request)
+ self._response = response
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ async def pages(
+ self,
+ ) -> AsyncIterable[job_service.ListHyperparameterTuningJobsResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = await self._method(self._request, metadata=self._metadata)
+ yield self._response
+
+ def __aiter__(
+ self,
+ ) -> AsyncIterable[hyperparameter_tuning_job.HyperparameterTuningJob]:
+ async def async_generator():
+ async for page in self.pages:
+ for response in page.hyperparameter_tuning_jobs:
+ yield response
+
+ return async_generator()
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
+
+
+class ListBatchPredictionJobsPager:
+ """A pager for iterating through ``list_batch_prediction_jobs`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.cloud.aiplatform_v1.types.ListBatchPredictionJobsResponse` object, and
+ provides an ``__iter__`` method to iterate through its
+ ``batch_prediction_jobs`` field.
+
+ If there are more pages, the ``__iter__`` method will make additional
+ ``ListBatchPredictionJobs`` requests and continue to iterate
+ through the ``batch_prediction_jobs`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.cloud.aiplatform_v1.types.ListBatchPredictionJobsResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[..., job_service.ListBatchPredictionJobsResponse],
+ request: job_service.ListBatchPredictionJobsRequest,
+ response: job_service.ListBatchPredictionJobsResponse,
+ *,
+ metadata: Sequence[Tuple[str, str]] = ()
+ ):
+ """Instantiate the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.cloud.aiplatform_v1.types.ListBatchPredictionJobsRequest):
+ The initial request object.
+ response (google.cloud.aiplatform_v1.types.ListBatchPredictionJobsResponse):
+ The initial response object.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ self._method = method
+ self._request = job_service.ListBatchPredictionJobsRequest(request)
+ self._response = response
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ def pages(self) -> Iterable[job_service.ListBatchPredictionJobsResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = self._method(self._request, metadata=self._metadata)
+ yield self._response
+
+ def __iter__(self) -> Iterable[batch_prediction_job.BatchPredictionJob]:
+ for page in self.pages:
+ yield from page.batch_prediction_jobs
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
+
+
+class ListBatchPredictionJobsAsyncPager:
+ """A pager for iterating through ``list_batch_prediction_jobs`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.cloud.aiplatform_v1.types.ListBatchPredictionJobsResponse` object, and
+ provides an ``__aiter__`` method to iterate through its
+ ``batch_prediction_jobs`` field.
+
+ If there are more pages, the ``__aiter__`` method will make additional
+ ``ListBatchPredictionJobs`` requests and continue to iterate
+ through the ``batch_prediction_jobs`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.cloud.aiplatform_v1.types.ListBatchPredictionJobsResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[..., Awaitable[job_service.ListBatchPredictionJobsResponse]],
+ request: job_service.ListBatchPredictionJobsRequest,
+ response: job_service.ListBatchPredictionJobsResponse,
+ *,
+ metadata: Sequence[Tuple[str, str]] = ()
+ ):
+ """Instantiate the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.cloud.aiplatform_v1.types.ListBatchPredictionJobsRequest):
+ The initial request object.
+ response (google.cloud.aiplatform_v1.types.ListBatchPredictionJobsResponse):
+ The initial response object.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ self._method = method
+ self._request = job_service.ListBatchPredictionJobsRequest(request)
+ self._response = response
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ async def pages(self) -> AsyncIterable[job_service.ListBatchPredictionJobsResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = await self._method(self._request, metadata=self._metadata)
+ yield self._response
+
+ def __aiter__(self) -> AsyncIterable[batch_prediction_job.BatchPredictionJob]:
+ async def async_generator():
+ async for page in self.pages:
+ for response in page.batch_prediction_jobs:
+ yield response
+
+ return async_generator()
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
diff --git a/google/cloud/aiplatform_v1/services/job_service/transports/__init__.py b/google/cloud/aiplatform_v1/services/job_service/transports/__init__.py
new file mode 100644
index 0000000000..349bfbcdea
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/job_service/transports/__init__.py
@@ -0,0 +1,35 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from collections import OrderedDict
+from typing import Dict, Type
+
+from .base import JobServiceTransport
+from .grpc import JobServiceGrpcTransport
+from .grpc_asyncio import JobServiceGrpcAsyncIOTransport
+
+
+# Compile a registry of transports.
+_transport_registry = OrderedDict() # type: Dict[str, Type[JobServiceTransport]]
+_transport_registry["grpc"] = JobServiceGrpcTransport
+_transport_registry["grpc_asyncio"] = JobServiceGrpcAsyncIOTransport
+
+__all__ = (
+ "JobServiceTransport",
+ "JobServiceGrpcTransport",
+ "JobServiceGrpcAsyncIOTransport",
+)
diff --git a/google/cloud/aiplatform_v1/services/job_service/transports/base.py b/google/cloud/aiplatform_v1/services/job_service/transports/base.py
new file mode 100644
index 0000000000..42ab8e1688
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/job_service/transports/base.py
@@ -0,0 +1,434 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import abc
+import typing
+import pkg_resources
+
+from google import auth # type: ignore
+from google.api_core import exceptions # type: ignore
+from google.api_core import gapic_v1 # type: ignore
+from google.api_core import retry as retries # type: ignore
+from google.api_core import operations_v1 # type: ignore
+from google.auth import credentials # type: ignore
+
+from google.cloud.aiplatform_v1.types import batch_prediction_job
+from google.cloud.aiplatform_v1.types import (
+ batch_prediction_job as gca_batch_prediction_job,
+)
+from google.cloud.aiplatform_v1.types import custom_job
+from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job
+from google.cloud.aiplatform_v1.types import data_labeling_job
+from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job
+from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job
+from google.cloud.aiplatform_v1.types import (
+ hyperparameter_tuning_job as gca_hyperparameter_tuning_job,
+)
+from google.cloud.aiplatform_v1.types import job_service
+from google.longrunning import operations_pb2 as operations # type: ignore
+from google.protobuf import empty_pb2 as empty # type: ignore
+
+
+try:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=pkg_resources.get_distribution(
+ "google-cloud-aiplatform",
+ ).version,
+ )
+except pkg_resources.DistributionNotFound:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
+
+
+class JobServiceTransport(abc.ABC):
+ """Abstract transport class for JobService."""
+
+ AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
+
+ def __init__(
+ self,
+ *,
+ host: str = "aiplatform.googleapis.com",
+ credentials: credentials.Credentials = None,
+ credentials_file: typing.Optional[str] = None,
+ scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES,
+ quota_project_id: typing.Optional[str] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ **kwargs,
+ ) -> None:
+ """Instantiate the transport.
+
+ Args:
+ host (Optional[str]): The hostname to connect to.
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is mutually exclusive with credentials.
+ scope (Optional[Sequence[str]]): A list of scopes.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+ """
+ # Save the hostname. Default to port 443 (HTTPS) if none is specified.
+ if ":" not in host:
+ host += ":443"
+ self._host = host
+
+ # If no credentials are provided, then determine the appropriate
+ # defaults.
+ if credentials and credentials_file:
+ raise exceptions.DuplicateCredentialArgs(
+ "'credentials_file' and 'credentials' are mutually exclusive"
+ )
+
+ if credentials_file is not None:
+ credentials, _ = auth.load_credentials_from_file(
+ credentials_file, scopes=scopes, quota_project_id=quota_project_id
+ )
+
+ elif credentials is None:
+ credentials, _ = auth.default(
+ scopes=scopes, quota_project_id=quota_project_id
+ )
+
+ # Save the credentials.
+ self._credentials = credentials
+
+ # Lifted into its own function so it can be stubbed out during tests.
+ self._prep_wrapped_messages(client_info)
+
+ def _prep_wrapped_messages(self, client_info):
+ # Precompute the wrapped methods.
+ self._wrapped_methods = {
+ self.create_custom_job: gapic_v1.method.wrap_method(
+ self.create_custom_job, default_timeout=None, client_info=client_info,
+ ),
+ self.get_custom_job: gapic_v1.method.wrap_method(
+ self.get_custom_job, default_timeout=None, client_info=client_info,
+ ),
+ self.list_custom_jobs: gapic_v1.method.wrap_method(
+ self.list_custom_jobs, default_timeout=None, client_info=client_info,
+ ),
+ self.delete_custom_job: gapic_v1.method.wrap_method(
+ self.delete_custom_job, default_timeout=None, client_info=client_info,
+ ),
+ self.cancel_custom_job: gapic_v1.method.wrap_method(
+ self.cancel_custom_job, default_timeout=None, client_info=client_info,
+ ),
+ self.create_data_labeling_job: gapic_v1.method.wrap_method(
+ self.create_data_labeling_job,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.get_data_labeling_job: gapic_v1.method.wrap_method(
+ self.get_data_labeling_job,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.list_data_labeling_jobs: gapic_v1.method.wrap_method(
+ self.list_data_labeling_jobs,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.delete_data_labeling_job: gapic_v1.method.wrap_method(
+ self.delete_data_labeling_job,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.cancel_data_labeling_job: gapic_v1.method.wrap_method(
+ self.cancel_data_labeling_job,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.create_hyperparameter_tuning_job: gapic_v1.method.wrap_method(
+ self.create_hyperparameter_tuning_job,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.get_hyperparameter_tuning_job: gapic_v1.method.wrap_method(
+ self.get_hyperparameter_tuning_job,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.list_hyperparameter_tuning_jobs: gapic_v1.method.wrap_method(
+ self.list_hyperparameter_tuning_jobs,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.delete_hyperparameter_tuning_job: gapic_v1.method.wrap_method(
+ self.delete_hyperparameter_tuning_job,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.cancel_hyperparameter_tuning_job: gapic_v1.method.wrap_method(
+ self.cancel_hyperparameter_tuning_job,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.create_batch_prediction_job: gapic_v1.method.wrap_method(
+ self.create_batch_prediction_job,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.get_batch_prediction_job: gapic_v1.method.wrap_method(
+ self.get_batch_prediction_job,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.list_batch_prediction_jobs: gapic_v1.method.wrap_method(
+ self.list_batch_prediction_jobs,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.delete_batch_prediction_job: gapic_v1.method.wrap_method(
+ self.delete_batch_prediction_job,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.cancel_batch_prediction_job: gapic_v1.method.wrap_method(
+ self.cancel_batch_prediction_job,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ }
+
+ @property
+ def operations_client(self) -> operations_v1.OperationsClient:
+ """Return the client designed to process long-running operations."""
+ raise NotImplementedError()
+
+ @property
+ def create_custom_job(
+ self,
+ ) -> typing.Callable[
+ [job_service.CreateCustomJobRequest],
+ typing.Union[
+ gca_custom_job.CustomJob, typing.Awaitable[gca_custom_job.CustomJob]
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def get_custom_job(
+ self,
+ ) -> typing.Callable[
+ [job_service.GetCustomJobRequest],
+ typing.Union[custom_job.CustomJob, typing.Awaitable[custom_job.CustomJob]],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def list_custom_jobs(
+ self,
+ ) -> typing.Callable[
+ [job_service.ListCustomJobsRequest],
+ typing.Union[
+ job_service.ListCustomJobsResponse,
+ typing.Awaitable[job_service.ListCustomJobsResponse],
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def delete_custom_job(
+ self,
+ ) -> typing.Callable[
+ [job_service.DeleteCustomJobRequest],
+ typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def cancel_custom_job(
+ self,
+ ) -> typing.Callable[
+ [job_service.CancelCustomJobRequest],
+ typing.Union[empty.Empty, typing.Awaitable[empty.Empty]],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def create_data_labeling_job(
+ self,
+ ) -> typing.Callable[
+ [job_service.CreateDataLabelingJobRequest],
+ typing.Union[
+ gca_data_labeling_job.DataLabelingJob,
+ typing.Awaitable[gca_data_labeling_job.DataLabelingJob],
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def get_data_labeling_job(
+ self,
+ ) -> typing.Callable[
+ [job_service.GetDataLabelingJobRequest],
+ typing.Union[
+ data_labeling_job.DataLabelingJob,
+ typing.Awaitable[data_labeling_job.DataLabelingJob],
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def list_data_labeling_jobs(
+ self,
+ ) -> typing.Callable[
+ [job_service.ListDataLabelingJobsRequest],
+ typing.Union[
+ job_service.ListDataLabelingJobsResponse,
+ typing.Awaitable[job_service.ListDataLabelingJobsResponse],
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def delete_data_labeling_job(
+ self,
+ ) -> typing.Callable[
+ [job_service.DeleteDataLabelingJobRequest],
+ typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def cancel_data_labeling_job(
+ self,
+ ) -> typing.Callable[
+ [job_service.CancelDataLabelingJobRequest],
+ typing.Union[empty.Empty, typing.Awaitable[empty.Empty]],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def create_hyperparameter_tuning_job(
+ self,
+ ) -> typing.Callable[
+ [job_service.CreateHyperparameterTuningJobRequest],
+ typing.Union[
+ gca_hyperparameter_tuning_job.HyperparameterTuningJob,
+ typing.Awaitable[gca_hyperparameter_tuning_job.HyperparameterTuningJob],
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def get_hyperparameter_tuning_job(
+ self,
+ ) -> typing.Callable[
+ [job_service.GetHyperparameterTuningJobRequest],
+ typing.Union[
+ hyperparameter_tuning_job.HyperparameterTuningJob,
+ typing.Awaitable[hyperparameter_tuning_job.HyperparameterTuningJob],
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def list_hyperparameter_tuning_jobs(
+ self,
+ ) -> typing.Callable[
+ [job_service.ListHyperparameterTuningJobsRequest],
+ typing.Union[
+ job_service.ListHyperparameterTuningJobsResponse,
+ typing.Awaitable[job_service.ListHyperparameterTuningJobsResponse],
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def delete_hyperparameter_tuning_job(
+ self,
+ ) -> typing.Callable[
+ [job_service.DeleteHyperparameterTuningJobRequest],
+ typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def cancel_hyperparameter_tuning_job(
+ self,
+ ) -> typing.Callable[
+ [job_service.CancelHyperparameterTuningJobRequest],
+ typing.Union[empty.Empty, typing.Awaitable[empty.Empty]],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def create_batch_prediction_job(
+ self,
+ ) -> typing.Callable[
+ [job_service.CreateBatchPredictionJobRequest],
+ typing.Union[
+ gca_batch_prediction_job.BatchPredictionJob,
+ typing.Awaitable[gca_batch_prediction_job.BatchPredictionJob],
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def get_batch_prediction_job(
+ self,
+ ) -> typing.Callable[
+ [job_service.GetBatchPredictionJobRequest],
+ typing.Union[
+ batch_prediction_job.BatchPredictionJob,
+ typing.Awaitable[batch_prediction_job.BatchPredictionJob],
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def list_batch_prediction_jobs(
+ self,
+ ) -> typing.Callable[
+ [job_service.ListBatchPredictionJobsRequest],
+ typing.Union[
+ job_service.ListBatchPredictionJobsResponse,
+ typing.Awaitable[job_service.ListBatchPredictionJobsResponse],
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def delete_batch_prediction_job(
+ self,
+ ) -> typing.Callable[
+ [job_service.DeleteBatchPredictionJobRequest],
+ typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def cancel_batch_prediction_job(
+ self,
+ ) -> typing.Callable[
+ [job_service.CancelBatchPredictionJobRequest],
+ typing.Union[empty.Empty, typing.Awaitable[empty.Empty]],
+ ]:
+ raise NotImplementedError()
+
+
+__all__ = ("JobServiceTransport",)
diff --git a/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py
new file mode 100644
index 0000000000..139aaf3345
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py
@@ -0,0 +1,886 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import warnings
+from typing import Callable, Dict, Optional, Sequence, Tuple
+
+from google.api_core import grpc_helpers # type: ignore
+from google.api_core import operations_v1 # type: ignore
+from google.api_core import gapic_v1 # type: ignore
+from google import auth # type: ignore
+from google.auth import credentials # type: ignore
+from google.auth.transport.grpc import SslCredentials # type: ignore
+
+import grpc # type: ignore
+
+from google.cloud.aiplatform_v1.types import batch_prediction_job
+from google.cloud.aiplatform_v1.types import (
+ batch_prediction_job as gca_batch_prediction_job,
+)
+from google.cloud.aiplatform_v1.types import custom_job
+from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job
+from google.cloud.aiplatform_v1.types import data_labeling_job
+from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job
+from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job
+from google.cloud.aiplatform_v1.types import (
+ hyperparameter_tuning_job as gca_hyperparameter_tuning_job,
+)
+from google.cloud.aiplatform_v1.types import job_service
+from google.longrunning import operations_pb2 as operations # type: ignore
+from google.protobuf import empty_pb2 as empty # type: ignore
+
+from .base import JobServiceTransport, DEFAULT_CLIENT_INFO
+
+
+class JobServiceGrpcTransport(JobServiceTransport):
+ """gRPC backend transport for JobService.
+
+ A service for creating and managing AI Platform's jobs.
+
+ This class defines the same methods as the primary client, so the
+ primary client can load the underlying transport implementation
+ and call it.
+
+ It sends protocol buffers over the wire using gRPC (which is built on
+ top of HTTP/2); the ``grpcio`` package must be installed.
+ """
+
+ _stubs: Dict[str, Callable]
+
+ def __init__(
+ self,
+ *,
+ host: str = "aiplatform.googleapis.com",
+ credentials: credentials.Credentials = None,
+ credentials_file: str = None,
+ scopes: Sequence[str] = None,
+ channel: grpc.Channel = None,
+ api_mtls_endpoint: str = None,
+ client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
+ ssl_channel_credentials: grpc.ChannelCredentials = None,
+ client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
+ quota_project_id: Optional[str] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ ) -> None:
+ """Instantiate the transport.
+
+ Args:
+ host (Optional[str]): The hostname to connect to.
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ This argument is ignored if ``channel`` is provided.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is ignored if ``channel`` is provided.
+ scopes (Optional(Sequence[str])): A list of scopes. This argument is
+ ignored if ``channel`` is provided.
+ channel (Optional[grpc.Channel]): A ``Channel`` instance through
+ which to make calls.
+ api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
+ If provided, it overrides the ``host`` argument and tries to create
+ a mutual TLS channel with client SSL credentials from
+ ``client_cert_source`` or applicatin default SSL credentials.
+ client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
+ Deprecated. A callback to provide client SSL certificate bytes and
+ private key bytes, both in PEM format. It is ignored if
+ ``api_mtls_endpoint`` is None.
+ ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
+ for grpc channel. It is ignored if ``channel`` is provided.
+ client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
+ A callback to provide client certificate bytes and private key bytes,
+ both in PEM format. It is used to configure mutual TLS channel. It is
+ ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+
+ Raises:
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
+ creation failed for any reason.
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
+ and ``credentials_file`` are passed.
+ """
+ self._ssl_channel_credentials = ssl_channel_credentials
+
+ if api_mtls_endpoint:
+ warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
+ if client_cert_source:
+ warnings.warn("client_cert_source is deprecated", DeprecationWarning)
+
+ if channel:
+ # Sanity check: Ensure that channel and credentials are not both
+ # provided.
+ credentials = False
+
+ # If a channel was explicitly provided, set it.
+ self._grpc_channel = channel
+ self._ssl_channel_credentials = None
+ elif api_mtls_endpoint:
+ host = (
+ api_mtls_endpoint
+ if ":" in api_mtls_endpoint
+ else api_mtls_endpoint + ":443"
+ )
+
+ if credentials is None:
+ credentials, _ = auth.default(
+ scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
+ )
+
+ # Create SSL credentials with client_cert_source or application
+ # default SSL credentials.
+ if client_cert_source:
+ cert, key = client_cert_source()
+ ssl_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+ else:
+ ssl_credentials = SslCredentials().ssl_credentials
+
+ # create a new channel. The provided one is ignored.
+ self._grpc_channel = type(self).create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ ssl_credentials=ssl_credentials,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ self._ssl_channel_credentials = ssl_credentials
+ else:
+ host = host if ":" in host else host + ":443"
+
+ if credentials is None:
+ credentials, _ = auth.default(
+ scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
+ )
+
+ if client_cert_source_for_mtls and not ssl_channel_credentials:
+ cert, key = client_cert_source_for_mtls()
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+
+ # create a new channel. The provided one is ignored.
+ self._grpc_channel = type(self).create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ ssl_credentials=self._ssl_channel_credentials,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+ self._stubs = {} # type: Dict[str, Callable]
+ self._operations_client = None
+
+ # Run the base constructor.
+ super().__init__(
+ host=host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ client_info=client_info,
+ )
+
+ @classmethod
+ def create_channel(
+ cls,
+ host: str = "aiplatform.googleapis.com",
+ credentials: credentials.Credentials = None,
+ credentials_file: str = None,
+ scopes: Optional[Sequence[str]] = None,
+ quota_project_id: Optional[str] = None,
+ **kwargs,
+ ) -> grpc.Channel:
+ """Create and return a gRPC channel object.
+ Args:
+ address (Optional[str]): The host for the channel to use.
+ credentials (Optional[~.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify this application to the service. If
+ none are specified, the client will attempt to ascertain
+ the credentials from the environment.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is mutually exclusive with credentials.
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
+ service. These are only used when credentials are not specified and
+ are passed to :func:`google.auth.default`.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ kwargs (Optional[dict]): Keyword arguments, which are passed to the
+ channel creation.
+ Returns:
+ grpc.Channel: A gRPC channel object.
+
+ Raises:
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
+ and ``credentials_file`` are passed.
+ """
+ scopes = scopes or cls.AUTH_SCOPES
+ return grpc_helpers.create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes,
+ quota_project_id=quota_project_id,
+ **kwargs,
+ )
+
+ @property
+ def grpc_channel(self) -> grpc.Channel:
+ """Return the channel designed to connect to this service.
+ """
+ return self._grpc_channel
+
+ @property
+ def operations_client(self) -> operations_v1.OperationsClient:
+ """Create the client designed to process long-running operations.
+
+ This property caches on the instance; repeated calls return the same
+ client.
+ """
+ # Sanity check: Only create a new client if we do not already have one.
+ if self._operations_client is None:
+ self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
+
+ # Return the client from cache.
+ return self._operations_client
+
+ @property
+ def create_custom_job(
+ self,
+ ) -> Callable[[job_service.CreateCustomJobRequest], gca_custom_job.CustomJob]:
+ r"""Return a callable for the create custom job method over gRPC.
+
+ Creates a CustomJob. A created CustomJob right away
+ will be attempted to be run.
+
+ Returns:
+ Callable[[~.CreateCustomJobRequest],
+ ~.CustomJob]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "create_custom_job" not in self._stubs:
+ self._stubs["create_custom_job"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.JobService/CreateCustomJob",
+ request_serializer=job_service.CreateCustomJobRequest.serialize,
+ response_deserializer=gca_custom_job.CustomJob.deserialize,
+ )
+ return self._stubs["create_custom_job"]
+
+ @property
+ def get_custom_job(
+ self,
+ ) -> Callable[[job_service.GetCustomJobRequest], custom_job.CustomJob]:
+ r"""Return a callable for the get custom job method over gRPC.
+
+ Gets a CustomJob.
+
+ Returns:
+ Callable[[~.GetCustomJobRequest],
+ ~.CustomJob]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "get_custom_job" not in self._stubs:
+ self._stubs["get_custom_job"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.JobService/GetCustomJob",
+ request_serializer=job_service.GetCustomJobRequest.serialize,
+ response_deserializer=custom_job.CustomJob.deserialize,
+ )
+ return self._stubs["get_custom_job"]
+
+ @property
+ def list_custom_jobs(
+ self,
+ ) -> Callable[
+ [job_service.ListCustomJobsRequest], job_service.ListCustomJobsResponse
+ ]:
+ r"""Return a callable for the list custom jobs method over gRPC.
+
+ Lists CustomJobs in a Location.
+
+ Returns:
+ Callable[[~.ListCustomJobsRequest],
+ ~.ListCustomJobsResponse]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_custom_jobs" not in self._stubs:
+ self._stubs["list_custom_jobs"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.JobService/ListCustomJobs",
+ request_serializer=job_service.ListCustomJobsRequest.serialize,
+ response_deserializer=job_service.ListCustomJobsResponse.deserialize,
+ )
+ return self._stubs["list_custom_jobs"]
+
+ @property
+ def delete_custom_job(
+ self,
+ ) -> Callable[[job_service.DeleteCustomJobRequest], operations.Operation]:
+ r"""Return a callable for the delete custom job method over gRPC.
+
+ Deletes a CustomJob.
+
+ Returns:
+ Callable[[~.DeleteCustomJobRequest],
+ ~.Operation]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "delete_custom_job" not in self._stubs:
+ self._stubs["delete_custom_job"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.JobService/DeleteCustomJob",
+ request_serializer=job_service.DeleteCustomJobRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["delete_custom_job"]
+
+ @property
+ def cancel_custom_job(
+ self,
+ ) -> Callable[[job_service.CancelCustomJobRequest], empty.Empty]:
+ r"""Return a callable for the cancel custom job method over gRPC.
+
+ Cancels a CustomJob. Starts asynchronous cancellation on the
+ CustomJob. The server makes a best effort to cancel the job, but
+ success is not guaranteed. Clients can use
+ ``JobService.GetCustomJob``
+ or other methods to check whether the cancellation succeeded or
+ whether the job completed despite cancellation. On successful
+ cancellation, the CustomJob is not deleted; instead it becomes a
+ job with a
+ ``CustomJob.error``
+ value with a ``google.rpc.Status.code`` of
+ 1, corresponding to ``Code.CANCELLED``, and
+ ``CustomJob.state`` is
+ set to ``CANCELLED``.
+
+ Returns:
+ Callable[[~.CancelCustomJobRequest],
+ ~.Empty]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "cancel_custom_job" not in self._stubs:
+ self._stubs["cancel_custom_job"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.JobService/CancelCustomJob",
+ request_serializer=job_service.CancelCustomJobRequest.serialize,
+ response_deserializer=empty.Empty.FromString,
+ )
+ return self._stubs["cancel_custom_job"]
+
+ @property
+ def create_data_labeling_job(
+ self,
+ ) -> Callable[
+ [job_service.CreateDataLabelingJobRequest],
+ gca_data_labeling_job.DataLabelingJob,
+ ]:
+ r"""Return a callable for the create data labeling job method over gRPC.
+
+ Creates a DataLabelingJob.
+
+ Returns:
+ Callable[[~.CreateDataLabelingJobRequest],
+ ~.DataLabelingJob]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "create_data_labeling_job" not in self._stubs:
+ self._stubs["create_data_labeling_job"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.JobService/CreateDataLabelingJob",
+ request_serializer=job_service.CreateDataLabelingJobRequest.serialize,
+ response_deserializer=gca_data_labeling_job.DataLabelingJob.deserialize,
+ )
+ return self._stubs["create_data_labeling_job"]
+
+ @property
+ def get_data_labeling_job(
+ self,
+ ) -> Callable[
+ [job_service.GetDataLabelingJobRequest], data_labeling_job.DataLabelingJob
+ ]:
+ r"""Return a callable for the get data labeling job method over gRPC.
+
+ Gets a DataLabelingJob.
+
+ Returns:
+ Callable[[~.GetDataLabelingJobRequest],
+ ~.DataLabelingJob]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "get_data_labeling_job" not in self._stubs:
+ self._stubs["get_data_labeling_job"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.JobService/GetDataLabelingJob",
+ request_serializer=job_service.GetDataLabelingJobRequest.serialize,
+ response_deserializer=data_labeling_job.DataLabelingJob.deserialize,
+ )
+ return self._stubs["get_data_labeling_job"]
+
+ @property
+ def list_data_labeling_jobs(
+ self,
+ ) -> Callable[
+ [job_service.ListDataLabelingJobsRequest],
+ job_service.ListDataLabelingJobsResponse,
+ ]:
+ r"""Return a callable for the list data labeling jobs method over gRPC.
+
+ Lists DataLabelingJobs in a Location.
+
+ Returns:
+ Callable[[~.ListDataLabelingJobsRequest],
+ ~.ListDataLabelingJobsResponse]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_data_labeling_jobs" not in self._stubs:
+ self._stubs["list_data_labeling_jobs"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.JobService/ListDataLabelingJobs",
+ request_serializer=job_service.ListDataLabelingJobsRequest.serialize,
+ response_deserializer=job_service.ListDataLabelingJobsResponse.deserialize,
+ )
+ return self._stubs["list_data_labeling_jobs"]
+
+ @property
+ def delete_data_labeling_job(
+ self,
+ ) -> Callable[[job_service.DeleteDataLabelingJobRequest], operations.Operation]:
+ r"""Return a callable for the delete data labeling job method over gRPC.
+
+ Deletes a DataLabelingJob.
+
+ Returns:
+ Callable[[~.DeleteDataLabelingJobRequest],
+ ~.Operation]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "delete_data_labeling_job" not in self._stubs:
+ self._stubs["delete_data_labeling_job"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.JobService/DeleteDataLabelingJob",
+ request_serializer=job_service.DeleteDataLabelingJobRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["delete_data_labeling_job"]
+
+ @property
+ def cancel_data_labeling_job(
+ self,
+ ) -> Callable[[job_service.CancelDataLabelingJobRequest], empty.Empty]:
+ r"""Return a callable for the cancel data labeling job method over gRPC.
+
+ Cancels a DataLabelingJob. Success of cancellation is
+ not guaranteed.
+
+ Returns:
+ Callable[[~.CancelDataLabelingJobRequest],
+ ~.Empty]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "cancel_data_labeling_job" not in self._stubs:
+ self._stubs["cancel_data_labeling_job"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.JobService/CancelDataLabelingJob",
+ request_serializer=job_service.CancelDataLabelingJobRequest.serialize,
+ response_deserializer=empty.Empty.FromString,
+ )
+ return self._stubs["cancel_data_labeling_job"]
+
+ @property
+ def create_hyperparameter_tuning_job(
+ self,
+ ) -> Callable[
+ [job_service.CreateHyperparameterTuningJobRequest],
+ gca_hyperparameter_tuning_job.HyperparameterTuningJob,
+ ]:
+ r"""Return a callable for the create hyperparameter tuning
+ job method over gRPC.
+
+ Creates a HyperparameterTuningJob
+
+ Returns:
+ Callable[[~.CreateHyperparameterTuningJobRequest],
+ ~.HyperparameterTuningJob]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "create_hyperparameter_tuning_job" not in self._stubs:
+ self._stubs[
+ "create_hyperparameter_tuning_job"
+ ] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.JobService/CreateHyperparameterTuningJob",
+ request_serializer=job_service.CreateHyperparameterTuningJobRequest.serialize,
+ response_deserializer=gca_hyperparameter_tuning_job.HyperparameterTuningJob.deserialize,
+ )
+ return self._stubs["create_hyperparameter_tuning_job"]
+
+ @property
+ def get_hyperparameter_tuning_job(
+ self,
+ ) -> Callable[
+ [job_service.GetHyperparameterTuningJobRequest],
+ hyperparameter_tuning_job.HyperparameterTuningJob,
+ ]:
+ r"""Return a callable for the get hyperparameter tuning job method over gRPC.
+
+ Gets a HyperparameterTuningJob
+
+ Returns:
+ Callable[[~.GetHyperparameterTuningJobRequest],
+ ~.HyperparameterTuningJob]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "get_hyperparameter_tuning_job" not in self._stubs:
+ self._stubs[
+ "get_hyperparameter_tuning_job"
+ ] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.JobService/GetHyperparameterTuningJob",
+ request_serializer=job_service.GetHyperparameterTuningJobRequest.serialize,
+ response_deserializer=hyperparameter_tuning_job.HyperparameterTuningJob.deserialize,
+ )
+ return self._stubs["get_hyperparameter_tuning_job"]
+
+ @property
+ def list_hyperparameter_tuning_jobs(
+ self,
+ ) -> Callable[
+ [job_service.ListHyperparameterTuningJobsRequest],
+ job_service.ListHyperparameterTuningJobsResponse,
+ ]:
+ r"""Return a callable for the list hyperparameter tuning
+ jobs method over gRPC.
+
+ Lists HyperparameterTuningJobs in a Location.
+
+ Returns:
+ Callable[[~.ListHyperparameterTuningJobsRequest],
+ ~.ListHyperparameterTuningJobsResponse]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_hyperparameter_tuning_jobs" not in self._stubs:
+ self._stubs[
+ "list_hyperparameter_tuning_jobs"
+ ] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.JobService/ListHyperparameterTuningJobs",
+ request_serializer=job_service.ListHyperparameterTuningJobsRequest.serialize,
+ response_deserializer=job_service.ListHyperparameterTuningJobsResponse.deserialize,
+ )
+ return self._stubs["list_hyperparameter_tuning_jobs"]
+
+ @property
+ def delete_hyperparameter_tuning_job(
+ self,
+ ) -> Callable[
+ [job_service.DeleteHyperparameterTuningJobRequest], operations.Operation
+ ]:
+ r"""Return a callable for the delete hyperparameter tuning
+ job method over gRPC.
+
+ Deletes a HyperparameterTuningJob.
+
+ Returns:
+ Callable[[~.DeleteHyperparameterTuningJobRequest],
+ ~.Operation]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "delete_hyperparameter_tuning_job" not in self._stubs:
+ self._stubs[
+ "delete_hyperparameter_tuning_job"
+ ] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.JobService/DeleteHyperparameterTuningJob",
+ request_serializer=job_service.DeleteHyperparameterTuningJobRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["delete_hyperparameter_tuning_job"]
+
+ @property
+ def cancel_hyperparameter_tuning_job(
+ self,
+ ) -> Callable[[job_service.CancelHyperparameterTuningJobRequest], empty.Empty]:
+ r"""Return a callable for the cancel hyperparameter tuning
+ job method over gRPC.
+
+ Cancels a HyperparameterTuningJob. Starts asynchronous
+ cancellation on the HyperparameterTuningJob. The server makes a
+ best effort to cancel the job, but success is not guaranteed.
+ Clients can use
+ ``JobService.GetHyperparameterTuningJob``
+ or other methods to check whether the cancellation succeeded or
+ whether the job completed despite cancellation. On successful
+ cancellation, the HyperparameterTuningJob is not deleted;
+ instead it becomes a job with a
+ ``HyperparameterTuningJob.error``
+ value with a ``google.rpc.Status.code`` of
+ 1, corresponding to ``Code.CANCELLED``, and
+ ``HyperparameterTuningJob.state``
+ is set to ``CANCELLED``.
+
+ Returns:
+ Callable[[~.CancelHyperparameterTuningJobRequest],
+ ~.Empty]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "cancel_hyperparameter_tuning_job" not in self._stubs:
+ self._stubs[
+ "cancel_hyperparameter_tuning_job"
+ ] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.JobService/CancelHyperparameterTuningJob",
+ request_serializer=job_service.CancelHyperparameterTuningJobRequest.serialize,
+ response_deserializer=empty.Empty.FromString,
+ )
+ return self._stubs["cancel_hyperparameter_tuning_job"]
+
+ @property
+ def create_batch_prediction_job(
+ self,
+ ) -> Callable[
+ [job_service.CreateBatchPredictionJobRequest],
+ gca_batch_prediction_job.BatchPredictionJob,
+ ]:
+ r"""Return a callable for the create batch prediction job method over gRPC.
+
+ Creates a BatchPredictionJob. A BatchPredictionJob
+ once created will right away be attempted to start.
+
+ Returns:
+ Callable[[~.CreateBatchPredictionJobRequest],
+ ~.BatchPredictionJob]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "create_batch_prediction_job" not in self._stubs:
+ self._stubs["create_batch_prediction_job"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.JobService/CreateBatchPredictionJob",
+ request_serializer=job_service.CreateBatchPredictionJobRequest.serialize,
+ response_deserializer=gca_batch_prediction_job.BatchPredictionJob.deserialize,
+ )
+ return self._stubs["create_batch_prediction_job"]
+
+ @property
+ def get_batch_prediction_job(
+ self,
+ ) -> Callable[
+ [job_service.GetBatchPredictionJobRequest],
+ batch_prediction_job.BatchPredictionJob,
+ ]:
+ r"""Return a callable for the get batch prediction job method over gRPC.
+
+ Gets a BatchPredictionJob
+
+ Returns:
+ Callable[[~.GetBatchPredictionJobRequest],
+ ~.BatchPredictionJob]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "get_batch_prediction_job" not in self._stubs:
+ self._stubs["get_batch_prediction_job"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.JobService/GetBatchPredictionJob",
+ request_serializer=job_service.GetBatchPredictionJobRequest.serialize,
+ response_deserializer=batch_prediction_job.BatchPredictionJob.deserialize,
+ )
+ return self._stubs["get_batch_prediction_job"]
+
+ @property
+ def list_batch_prediction_jobs(
+ self,
+ ) -> Callable[
+ [job_service.ListBatchPredictionJobsRequest],
+ job_service.ListBatchPredictionJobsResponse,
+ ]:
+ r"""Return a callable for the list batch prediction jobs method over gRPC.
+
+ Lists BatchPredictionJobs in a Location.
+
+ Returns:
+ Callable[[~.ListBatchPredictionJobsRequest],
+ ~.ListBatchPredictionJobsResponse]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_batch_prediction_jobs" not in self._stubs:
+ self._stubs["list_batch_prediction_jobs"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.JobService/ListBatchPredictionJobs",
+ request_serializer=job_service.ListBatchPredictionJobsRequest.serialize,
+ response_deserializer=job_service.ListBatchPredictionJobsResponse.deserialize,
+ )
+ return self._stubs["list_batch_prediction_jobs"]
+
+ @property
+ def delete_batch_prediction_job(
+ self,
+ ) -> Callable[[job_service.DeleteBatchPredictionJobRequest], operations.Operation]:
+ r"""Return a callable for the delete batch prediction job method over gRPC.
+
+ Deletes a BatchPredictionJob. Can only be called on
+ jobs that already finished.
+
+ Returns:
+ Callable[[~.DeleteBatchPredictionJobRequest],
+ ~.Operation]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "delete_batch_prediction_job" not in self._stubs:
+ self._stubs["delete_batch_prediction_job"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.JobService/DeleteBatchPredictionJob",
+ request_serializer=job_service.DeleteBatchPredictionJobRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["delete_batch_prediction_job"]
+
+ @property
+ def cancel_batch_prediction_job(
+ self,
+ ) -> Callable[[job_service.CancelBatchPredictionJobRequest], empty.Empty]:
+ r"""Return a callable for the cancel batch prediction job method over gRPC.
+
+ Cancels a BatchPredictionJob.
+
+ Starts asynchronous cancellation on the BatchPredictionJob. The
+ server makes the best effort to cancel the job, but success is
+ not guaranteed. Clients can use
+ ``JobService.GetBatchPredictionJob``
+ or other methods to check whether the cancellation succeeded or
+ whether the job completed despite cancellation. On a successful
+ cancellation, the BatchPredictionJob is not deleted;instead its
+ ``BatchPredictionJob.state``
+ is set to ``CANCELLED``. Any files already outputted by the job
+ are not deleted.
+
+ Returns:
+ Callable[[~.CancelBatchPredictionJobRequest],
+ ~.Empty]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "cancel_batch_prediction_job" not in self._stubs:
+ self._stubs["cancel_batch_prediction_job"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.JobService/CancelBatchPredictionJob",
+ request_serializer=job_service.CancelBatchPredictionJobRequest.serialize,
+ response_deserializer=empty.Empty.FromString,
+ )
+ return self._stubs["cancel_batch_prediction_job"]
+
+
+__all__ = ("JobServiceGrpcTransport",)
diff --git a/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py
new file mode 100644
index 0000000000..f056094c9d
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py
@@ -0,0 +1,907 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import warnings
+from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple
+
+from google.api_core import gapic_v1 # type: ignore
+from google.api_core import grpc_helpers_async # type: ignore
+from google.api_core import operations_v1 # type: ignore
+from google import auth # type: ignore
+from google.auth import credentials # type: ignore
+from google.auth.transport.grpc import SslCredentials # type: ignore
+
+import grpc # type: ignore
+from grpc.experimental import aio # type: ignore
+
+from google.cloud.aiplatform_v1.types import batch_prediction_job
+from google.cloud.aiplatform_v1.types import (
+ batch_prediction_job as gca_batch_prediction_job,
+)
+from google.cloud.aiplatform_v1.types import custom_job
+from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job
+from google.cloud.aiplatform_v1.types import data_labeling_job
+from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job
+from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job
+from google.cloud.aiplatform_v1.types import (
+ hyperparameter_tuning_job as gca_hyperparameter_tuning_job,
+)
+from google.cloud.aiplatform_v1.types import job_service
+from google.longrunning import operations_pb2 as operations # type: ignore
+from google.protobuf import empty_pb2 as empty # type: ignore
+
+from .base import JobServiceTransport, DEFAULT_CLIENT_INFO
+from .grpc import JobServiceGrpcTransport
+
+
+class JobServiceGrpcAsyncIOTransport(JobServiceTransport):
+ """gRPC AsyncIO backend transport for JobService.
+
+ A service for creating and managing AI Platform's jobs.
+
+ This class defines the same methods as the primary client, so the
+ primary client can load the underlying transport implementation
+ and call it.
+
+ It sends protocol buffers over the wire using gRPC (which is built on
+ top of HTTP/2); the ``grpcio`` package must be installed.
+ """
+
+ _grpc_channel: aio.Channel
+ _stubs: Dict[str, Callable] = {}
+
+ @classmethod
+ def create_channel(
+ cls,
+ host: str = "aiplatform.googleapis.com",
+ credentials: credentials.Credentials = None,
+ credentials_file: Optional[str] = None,
+ scopes: Optional[Sequence[str]] = None,
+ quota_project_id: Optional[str] = None,
+ **kwargs,
+ ) -> aio.Channel:
+ """Create and return a gRPC AsyncIO channel object.
+ Args:
+ address (Optional[str]): The host for the channel to use.
+ credentials (Optional[~.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify this application to the service. If
+ none are specified, the client will attempt to ascertain
+ the credentials from the environment.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is ignored if ``channel`` is provided.
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
+ service. These are only used when credentials are not specified and
+ are passed to :func:`google.auth.default`.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ kwargs (Optional[dict]): Keyword arguments, which are passed to the
+ channel creation.
+ Returns:
+ aio.Channel: A gRPC AsyncIO channel object.
+ """
+ scopes = scopes or cls.AUTH_SCOPES
+ return grpc_helpers_async.create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes,
+ quota_project_id=quota_project_id,
+ **kwargs,
+ )
+
+ def __init__(
+ self,
+ *,
+ host: str = "aiplatform.googleapis.com",
+ credentials: credentials.Credentials = None,
+ credentials_file: Optional[str] = None,
+ scopes: Optional[Sequence[str]] = None,
+ channel: aio.Channel = None,
+ api_mtls_endpoint: str = None,
+ client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
+ ssl_channel_credentials: grpc.ChannelCredentials = None,
+ client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
+ quota_project_id=None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ ) -> None:
+ """Instantiate the transport.
+
+ Args:
+ host (Optional[str]): The hostname to connect to.
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ This argument is ignored if ``channel`` is provided.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is ignored if ``channel`` is provided.
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
+ service. These are only used when credentials are not specified and
+ are passed to :func:`google.auth.default`.
+ channel (Optional[aio.Channel]): A ``Channel`` instance through
+ which to make calls.
+ api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
+ If provided, it overrides the ``host`` argument and tries to create
+ a mutual TLS channel with client SSL credentials from
+ ``client_cert_source`` or applicatin default SSL credentials.
+ client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
+ Deprecated. A callback to provide client SSL certificate bytes and
+ private key bytes, both in PEM format. It is ignored if
+ ``api_mtls_endpoint`` is None.
+ ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
+ for grpc channel. It is ignored if ``channel`` is provided.
+ client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
+ A callback to provide client certificate bytes and private key bytes,
+ both in PEM format. It is used to configure mutual TLS channel. It is
+ ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+
+ Raises:
+ google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
+ creation failed for any reason.
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
+ and ``credentials_file`` are passed.
+ """
+ self._ssl_channel_credentials = ssl_channel_credentials
+
+ if api_mtls_endpoint:
+ warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
+ if client_cert_source:
+ warnings.warn("client_cert_source is deprecated", DeprecationWarning)
+
+ if channel:
+ # Sanity check: Ensure that channel and credentials are not both
+ # provided.
+ credentials = False
+
+ # If a channel was explicitly provided, set it.
+ self._grpc_channel = channel
+ self._ssl_channel_credentials = None
+ elif api_mtls_endpoint:
+ host = (
+ api_mtls_endpoint
+ if ":" in api_mtls_endpoint
+ else api_mtls_endpoint + ":443"
+ )
+
+ if credentials is None:
+ credentials, _ = auth.default(
+ scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
+ )
+
+ # Create SSL credentials with client_cert_source or application
+ # default SSL credentials.
+ if client_cert_source:
+ cert, key = client_cert_source()
+ ssl_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+ else:
+ ssl_credentials = SslCredentials().ssl_credentials
+
+ # create a new channel. The provided one is ignored.
+ self._grpc_channel = type(self).create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ ssl_credentials=ssl_credentials,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ self._ssl_channel_credentials = ssl_credentials
+ else:
+ host = host if ":" in host else host + ":443"
+
+ if credentials is None:
+ credentials, _ = auth.default(
+ scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
+ )
+
+ if client_cert_source_for_mtls and not ssl_channel_credentials:
+ cert, key = client_cert_source_for_mtls()
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+
+ # create a new channel. The provided one is ignored.
+ self._grpc_channel = type(self).create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ ssl_credentials=self._ssl_channel_credentials,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+ # Run the base constructor.
+ super().__init__(
+ host=host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ client_info=client_info,
+ )
+
+ self._stubs = {}
+ self._operations_client = None
+
+ @property
+ def grpc_channel(self) -> aio.Channel:
+ """Create the channel designed to connect to this service.
+
+ This property caches on the instance; repeated calls return
+ the same channel.
+ """
+ # Return the channel from cache.
+ return self._grpc_channel
+
+ @property
+ def operations_client(self) -> operations_v1.OperationsAsyncClient:
+ """Create the client designed to process long-running operations.
+
+ This property caches on the instance; repeated calls return the same
+ client.
+ """
+ # Sanity check: Only create a new client if we do not already have one.
+ if self._operations_client is None:
+ self._operations_client = operations_v1.OperationsAsyncClient(
+ self.grpc_channel
+ )
+
+ # Return the client from cache.
+ return self._operations_client
+
+ @property
+ def create_custom_job(
+ self,
+ ) -> Callable[
+ [job_service.CreateCustomJobRequest], Awaitable[gca_custom_job.CustomJob]
+ ]:
+ r"""Return a callable for the create custom job method over gRPC.
+
+ Creates a CustomJob. A created CustomJob right away
+ will be attempted to be run.
+
+ Returns:
+ Callable[[~.CreateCustomJobRequest],
+ Awaitable[~.CustomJob]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "create_custom_job" not in self._stubs:
+ self._stubs["create_custom_job"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.JobService/CreateCustomJob",
+ request_serializer=job_service.CreateCustomJobRequest.serialize,
+ response_deserializer=gca_custom_job.CustomJob.deserialize,
+ )
+ return self._stubs["create_custom_job"]
+
+ @property
+ def get_custom_job(
+ self,
+ ) -> Callable[[job_service.GetCustomJobRequest], Awaitable[custom_job.CustomJob]]:
+ r"""Return a callable for the get custom job method over gRPC.
+
+ Gets a CustomJob.
+
+ Returns:
+ Callable[[~.GetCustomJobRequest],
+ Awaitable[~.CustomJob]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "get_custom_job" not in self._stubs:
+ self._stubs["get_custom_job"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.JobService/GetCustomJob",
+ request_serializer=job_service.GetCustomJobRequest.serialize,
+ response_deserializer=custom_job.CustomJob.deserialize,
+ )
+ return self._stubs["get_custom_job"]
+
+ @property
+ def list_custom_jobs(
+ self,
+ ) -> Callable[
+ [job_service.ListCustomJobsRequest],
+ Awaitable[job_service.ListCustomJobsResponse],
+ ]:
+ r"""Return a callable for the list custom jobs method over gRPC.
+
+ Lists CustomJobs in a Location.
+
+ Returns:
+ Callable[[~.ListCustomJobsRequest],
+ Awaitable[~.ListCustomJobsResponse]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_custom_jobs" not in self._stubs:
+ self._stubs["list_custom_jobs"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.JobService/ListCustomJobs",
+ request_serializer=job_service.ListCustomJobsRequest.serialize,
+ response_deserializer=job_service.ListCustomJobsResponse.deserialize,
+ )
+ return self._stubs["list_custom_jobs"]
+
+ @property
+ def delete_custom_job(
+ self,
+ ) -> Callable[
+ [job_service.DeleteCustomJobRequest], Awaitable[operations.Operation]
+ ]:
+ r"""Return a callable for the delete custom job method over gRPC.
+
+ Deletes a CustomJob.
+
+ Returns:
+ Callable[[~.DeleteCustomJobRequest],
+ Awaitable[~.Operation]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "delete_custom_job" not in self._stubs:
+ self._stubs["delete_custom_job"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.JobService/DeleteCustomJob",
+ request_serializer=job_service.DeleteCustomJobRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["delete_custom_job"]
+
+ @property
+ def cancel_custom_job(
+ self,
+ ) -> Callable[[job_service.CancelCustomJobRequest], Awaitable[empty.Empty]]:
+ r"""Return a callable for the cancel custom job method over gRPC.
+
+ Cancels a CustomJob. Starts asynchronous cancellation on the
+ CustomJob. The server makes a best effort to cancel the job, but
+ success is not guaranteed. Clients can use
+ ``JobService.GetCustomJob``
+ or other methods to check whether the cancellation succeeded or
+ whether the job completed despite cancellation. On successful
+ cancellation, the CustomJob is not deleted; instead it becomes a
+ job with a
+ ``CustomJob.error``
+ value with a ``google.rpc.Status.code`` of
+ 1, corresponding to ``Code.CANCELLED``, and
+ ``CustomJob.state`` is
+ set to ``CANCELLED``.
+
+ Returns:
+ Callable[[~.CancelCustomJobRequest],
+ Awaitable[~.Empty]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "cancel_custom_job" not in self._stubs:
+ self._stubs["cancel_custom_job"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.JobService/CancelCustomJob",
+ request_serializer=job_service.CancelCustomJobRequest.serialize,
+ response_deserializer=empty.Empty.FromString,
+ )
+ return self._stubs["cancel_custom_job"]
+
+ @property
+ def create_data_labeling_job(
+ self,
+ ) -> Callable[
+ [job_service.CreateDataLabelingJobRequest],
+ Awaitable[gca_data_labeling_job.DataLabelingJob],
+ ]:
+ r"""Return a callable for the create data labeling job method over gRPC.
+
+ Creates a DataLabelingJob.
+
+ Returns:
+ Callable[[~.CreateDataLabelingJobRequest],
+ Awaitable[~.DataLabelingJob]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "create_data_labeling_job" not in self._stubs:
+ self._stubs["create_data_labeling_job"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.JobService/CreateDataLabelingJob",
+ request_serializer=job_service.CreateDataLabelingJobRequest.serialize,
+ response_deserializer=gca_data_labeling_job.DataLabelingJob.deserialize,
+ )
+ return self._stubs["create_data_labeling_job"]
+
+ @property
+ def get_data_labeling_job(
+ self,
+ ) -> Callable[
+ [job_service.GetDataLabelingJobRequest],
+ Awaitable[data_labeling_job.DataLabelingJob],
+ ]:
+ r"""Return a callable for the get data labeling job method over gRPC.
+
+ Gets a DataLabelingJob.
+
+ Returns:
+ Callable[[~.GetDataLabelingJobRequest],
+ Awaitable[~.DataLabelingJob]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "get_data_labeling_job" not in self._stubs:
+ self._stubs["get_data_labeling_job"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.JobService/GetDataLabelingJob",
+ request_serializer=job_service.GetDataLabelingJobRequest.serialize,
+ response_deserializer=data_labeling_job.DataLabelingJob.deserialize,
+ )
+ return self._stubs["get_data_labeling_job"]
+
+ @property
+ def list_data_labeling_jobs(
+ self,
+ ) -> Callable[
+ [job_service.ListDataLabelingJobsRequest],
+ Awaitable[job_service.ListDataLabelingJobsResponse],
+ ]:
+ r"""Return a callable for the list data labeling jobs method over gRPC.
+
+ Lists DataLabelingJobs in a Location.
+
+ Returns:
+ Callable[[~.ListDataLabelingJobsRequest],
+ Awaitable[~.ListDataLabelingJobsResponse]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_data_labeling_jobs" not in self._stubs:
+ self._stubs["list_data_labeling_jobs"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.JobService/ListDataLabelingJobs",
+ request_serializer=job_service.ListDataLabelingJobsRequest.serialize,
+ response_deserializer=job_service.ListDataLabelingJobsResponse.deserialize,
+ )
+ return self._stubs["list_data_labeling_jobs"]
+
+ @property
+ def delete_data_labeling_job(
+ self,
+ ) -> Callable[
+ [job_service.DeleteDataLabelingJobRequest], Awaitable[operations.Operation]
+ ]:
+ r"""Return a callable for the delete data labeling job method over gRPC.
+
+ Deletes a DataLabelingJob.
+
+ Returns:
+ Callable[[~.DeleteDataLabelingJobRequest],
+ Awaitable[~.Operation]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "delete_data_labeling_job" not in self._stubs:
+ self._stubs["delete_data_labeling_job"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.JobService/DeleteDataLabelingJob",
+ request_serializer=job_service.DeleteDataLabelingJobRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["delete_data_labeling_job"]
+
+ @property
+ def cancel_data_labeling_job(
+ self,
+ ) -> Callable[[job_service.CancelDataLabelingJobRequest], Awaitable[empty.Empty]]:
+ r"""Return a callable for the cancel data labeling job method over gRPC.
+
+ Cancels a DataLabelingJob. Success of cancellation is
+ not guaranteed.
+
+ Returns:
+ Callable[[~.CancelDataLabelingJobRequest],
+ Awaitable[~.Empty]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "cancel_data_labeling_job" not in self._stubs:
+ self._stubs["cancel_data_labeling_job"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.JobService/CancelDataLabelingJob",
+ request_serializer=job_service.CancelDataLabelingJobRequest.serialize,
+ response_deserializer=empty.Empty.FromString,
+ )
+ return self._stubs["cancel_data_labeling_job"]
+
+ @property
+ def create_hyperparameter_tuning_job(
+ self,
+ ) -> Callable[
+ [job_service.CreateHyperparameterTuningJobRequest],
+ Awaitable[gca_hyperparameter_tuning_job.HyperparameterTuningJob],
+ ]:
+ r"""Return a callable for the create hyperparameter tuning
+ job method over gRPC.
+
+ Creates a HyperparameterTuningJob
+
+ Returns:
+ Callable[[~.CreateHyperparameterTuningJobRequest],
+ Awaitable[~.HyperparameterTuningJob]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "create_hyperparameter_tuning_job" not in self._stubs:
+ self._stubs[
+ "create_hyperparameter_tuning_job"
+ ] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.JobService/CreateHyperparameterTuningJob",
+ request_serializer=job_service.CreateHyperparameterTuningJobRequest.serialize,
+ response_deserializer=gca_hyperparameter_tuning_job.HyperparameterTuningJob.deserialize,
+ )
+ return self._stubs["create_hyperparameter_tuning_job"]
+
+ @property
+ def get_hyperparameter_tuning_job(
+ self,
+ ) -> Callable[
+ [job_service.GetHyperparameterTuningJobRequest],
+ Awaitable[hyperparameter_tuning_job.HyperparameterTuningJob],
+ ]:
+ r"""Return a callable for the get hyperparameter tuning job method over gRPC.
+
+ Gets a HyperparameterTuningJob
+
+ Returns:
+ Callable[[~.GetHyperparameterTuningJobRequest],
+ Awaitable[~.HyperparameterTuningJob]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "get_hyperparameter_tuning_job" not in self._stubs:
+ self._stubs[
+ "get_hyperparameter_tuning_job"
+ ] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.JobService/GetHyperparameterTuningJob",
+ request_serializer=job_service.GetHyperparameterTuningJobRequest.serialize,
+ response_deserializer=hyperparameter_tuning_job.HyperparameterTuningJob.deserialize,
+ )
+ return self._stubs["get_hyperparameter_tuning_job"]
+
+ @property
+ def list_hyperparameter_tuning_jobs(
+ self,
+ ) -> Callable[
+ [job_service.ListHyperparameterTuningJobsRequest],
+ Awaitable[job_service.ListHyperparameterTuningJobsResponse],
+ ]:
+ r"""Return a callable for the list hyperparameter tuning
+ jobs method over gRPC.
+
+ Lists HyperparameterTuningJobs in a Location.
+
+ Returns:
+ Callable[[~.ListHyperparameterTuningJobsRequest],
+ Awaitable[~.ListHyperparameterTuningJobsResponse]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_hyperparameter_tuning_jobs" not in self._stubs:
+ self._stubs[
+ "list_hyperparameter_tuning_jobs"
+ ] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.JobService/ListHyperparameterTuningJobs",
+ request_serializer=job_service.ListHyperparameterTuningJobsRequest.serialize,
+ response_deserializer=job_service.ListHyperparameterTuningJobsResponse.deserialize,
+ )
+ return self._stubs["list_hyperparameter_tuning_jobs"]
+
+ @property
+ def delete_hyperparameter_tuning_job(
+ self,
+ ) -> Callable[
+ [job_service.DeleteHyperparameterTuningJobRequest],
+ Awaitable[operations.Operation],
+ ]:
+ r"""Return a callable for the delete hyperparameter tuning
+ job method over gRPC.
+
+ Deletes a HyperparameterTuningJob.
+
+ Returns:
+ Callable[[~.DeleteHyperparameterTuningJobRequest],
+ Awaitable[~.Operation]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "delete_hyperparameter_tuning_job" not in self._stubs:
+ self._stubs[
+ "delete_hyperparameter_tuning_job"
+ ] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.JobService/DeleteHyperparameterTuningJob",
+ request_serializer=job_service.DeleteHyperparameterTuningJobRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["delete_hyperparameter_tuning_job"]
+
+ @property
+ def cancel_hyperparameter_tuning_job(
+ self,
+ ) -> Callable[
+ [job_service.CancelHyperparameterTuningJobRequest], Awaitable[empty.Empty]
+ ]:
+ r"""Return a callable for the cancel hyperparameter tuning
+ job method over gRPC.
+
+ Cancels a HyperparameterTuningJob. Starts asynchronous
+ cancellation on the HyperparameterTuningJob. The server makes a
+ best effort to cancel the job, but success is not guaranteed.
+ Clients can use
+ ``JobService.GetHyperparameterTuningJob``
+ or other methods to check whether the cancellation succeeded or
+ whether the job completed despite cancellation. On successful
+ cancellation, the HyperparameterTuningJob is not deleted;
+ instead it becomes a job with a
+ ``HyperparameterTuningJob.error``
+ value with a ``google.rpc.Status.code`` of
+ 1, corresponding to ``Code.CANCELLED``, and
+ ``HyperparameterTuningJob.state``
+ is set to ``CANCELLED``.
+
+ Returns:
+ Callable[[~.CancelHyperparameterTuningJobRequest],
+ Awaitable[~.Empty]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "cancel_hyperparameter_tuning_job" not in self._stubs:
+ self._stubs[
+ "cancel_hyperparameter_tuning_job"
+ ] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.JobService/CancelHyperparameterTuningJob",
+ request_serializer=job_service.CancelHyperparameterTuningJobRequest.serialize,
+ response_deserializer=empty.Empty.FromString,
+ )
+ return self._stubs["cancel_hyperparameter_tuning_job"]
+
+ @property
+ def create_batch_prediction_job(
+ self,
+ ) -> Callable[
+ [job_service.CreateBatchPredictionJobRequest],
+ Awaitable[gca_batch_prediction_job.BatchPredictionJob],
+ ]:
+ r"""Return a callable for the create batch prediction job method over gRPC.
+
+ Creates a BatchPredictionJob. A BatchPredictionJob
+ once created will right away be attempted to start.
+
+ Returns:
+ Callable[[~.CreateBatchPredictionJobRequest],
+ Awaitable[~.BatchPredictionJob]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "create_batch_prediction_job" not in self._stubs:
+ self._stubs["create_batch_prediction_job"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.JobService/CreateBatchPredictionJob",
+ request_serializer=job_service.CreateBatchPredictionJobRequest.serialize,
+ response_deserializer=gca_batch_prediction_job.BatchPredictionJob.deserialize,
+ )
+ return self._stubs["create_batch_prediction_job"]
+
+ @property
+ def get_batch_prediction_job(
+ self,
+ ) -> Callable[
+ [job_service.GetBatchPredictionJobRequest],
+ Awaitable[batch_prediction_job.BatchPredictionJob],
+ ]:
+ r"""Return a callable for the get batch prediction job method over gRPC.
+
+ Gets a BatchPredictionJob
+
+ Returns:
+ Callable[[~.GetBatchPredictionJobRequest],
+ Awaitable[~.BatchPredictionJob]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "get_batch_prediction_job" not in self._stubs:
+ self._stubs["get_batch_prediction_job"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.JobService/GetBatchPredictionJob",
+ request_serializer=job_service.GetBatchPredictionJobRequest.serialize,
+ response_deserializer=batch_prediction_job.BatchPredictionJob.deserialize,
+ )
+ return self._stubs["get_batch_prediction_job"]
+
+ @property
+ def list_batch_prediction_jobs(
+ self,
+ ) -> Callable[
+ [job_service.ListBatchPredictionJobsRequest],
+ Awaitable[job_service.ListBatchPredictionJobsResponse],
+ ]:
+ r"""Return a callable for the list batch prediction jobs method over gRPC.
+
+ Lists BatchPredictionJobs in a Location.
+
+ Returns:
+ Callable[[~.ListBatchPredictionJobsRequest],
+ Awaitable[~.ListBatchPredictionJobsResponse]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_batch_prediction_jobs" not in self._stubs:
+ self._stubs["list_batch_prediction_jobs"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.JobService/ListBatchPredictionJobs",
+ request_serializer=job_service.ListBatchPredictionJobsRequest.serialize,
+ response_deserializer=job_service.ListBatchPredictionJobsResponse.deserialize,
+ )
+ return self._stubs["list_batch_prediction_jobs"]
+
+ @property
+ def delete_batch_prediction_job(
+ self,
+ ) -> Callable[
+ [job_service.DeleteBatchPredictionJobRequest], Awaitable[operations.Operation]
+ ]:
+ r"""Return a callable for the delete batch prediction job method over gRPC.
+
+ Deletes a BatchPredictionJob. Can only be called on
+ jobs that already finished.
+
+ Returns:
+ Callable[[~.DeleteBatchPredictionJobRequest],
+ Awaitable[~.Operation]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "delete_batch_prediction_job" not in self._stubs:
+ self._stubs["delete_batch_prediction_job"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.JobService/DeleteBatchPredictionJob",
+ request_serializer=job_service.DeleteBatchPredictionJobRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["delete_batch_prediction_job"]
+
+ @property
+ def cancel_batch_prediction_job(
+ self,
+ ) -> Callable[
+ [job_service.CancelBatchPredictionJobRequest], Awaitable[empty.Empty]
+ ]:
+ r"""Return a callable for the cancel batch prediction job method over gRPC.
+
+ Cancels a BatchPredictionJob.
+
+ Starts asynchronous cancellation on the BatchPredictionJob. The
+ server makes the best effort to cancel the job, but success is
+ not guaranteed. Clients can use
+ ``JobService.GetBatchPredictionJob``
+ or other methods to check whether the cancellation succeeded or
+ whether the job completed despite cancellation. On a successful
+ cancellation, the BatchPredictionJob is not deleted;instead its
+ ``BatchPredictionJob.state``
+ is set to ``CANCELLED``. Any files already outputted by the job
+ are not deleted.
+
+ Returns:
+ Callable[[~.CancelBatchPredictionJobRequest],
+ Awaitable[~.Empty]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "cancel_batch_prediction_job" not in self._stubs:
+ self._stubs["cancel_batch_prediction_job"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.JobService/CancelBatchPredictionJob",
+ request_serializer=job_service.CancelBatchPredictionJobRequest.serialize,
+ response_deserializer=empty.Empty.FromString,
+ )
+ return self._stubs["cancel_batch_prediction_job"]
+
+
+__all__ = ("JobServiceGrpcAsyncIOTransport",)
diff --git a/google/cloud/aiplatform_v1/services/migration_service/__init__.py b/google/cloud/aiplatform_v1/services/migration_service/__init__.py
new file mode 100644
index 0000000000..1d6216d1f7
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/migration_service/__init__.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from .client import MigrationServiceClient
+from .async_client import MigrationServiceAsyncClient
+
+__all__ = (
+ "MigrationServiceClient",
+ "MigrationServiceAsyncClient",
+)
diff --git a/google/cloud/aiplatform_v1/services/migration_service/async_client.py b/google/cloud/aiplatform_v1/services/migration_service/async_client.py
new file mode 100644
index 0000000000..fcb1d23da7
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/migration_service/async_client.py
@@ -0,0 +1,367 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from collections import OrderedDict
+import functools
+import re
+from typing import Dict, Sequence, Tuple, Type, Union
+import pkg_resources
+
+import google.api_core.client_options as ClientOptions # type: ignore
+from google.api_core import exceptions # type: ignore
+from google.api_core import gapic_v1 # type: ignore
+from google.api_core import retry as retries # type: ignore
+from google.auth import credentials # type: ignore
+from google.oauth2 import service_account # type: ignore
+
+from google.api_core import operation # type: ignore
+from google.api_core import operation_async # type: ignore
+from google.cloud.aiplatform_v1.services.migration_service import pagers
+from google.cloud.aiplatform_v1.types import migratable_resource
+from google.cloud.aiplatform_v1.types import migration_service
+
+from .transports.base import MigrationServiceTransport, DEFAULT_CLIENT_INFO
+from .transports.grpc_asyncio import MigrationServiceGrpcAsyncIOTransport
+from .client import MigrationServiceClient
+
+
+class MigrationServiceAsyncClient:
+ """A service that migrates resources from automl.googleapis.com,
+ datalabeling.googleapis.com and ml.googleapis.com to AI
+ Platform.
+ """
+
+ _client: MigrationServiceClient
+
+ DEFAULT_ENDPOINT = MigrationServiceClient.DEFAULT_ENDPOINT
+ DEFAULT_MTLS_ENDPOINT = MigrationServiceClient.DEFAULT_MTLS_ENDPOINT
+
+ annotated_dataset_path = staticmethod(MigrationServiceClient.annotated_dataset_path)
+ parse_annotated_dataset_path = staticmethod(
+ MigrationServiceClient.parse_annotated_dataset_path
+ )
+ dataset_path = staticmethod(MigrationServiceClient.dataset_path)
+ parse_dataset_path = staticmethod(MigrationServiceClient.parse_dataset_path)
+ dataset_path = staticmethod(MigrationServiceClient.dataset_path)
+ parse_dataset_path = staticmethod(MigrationServiceClient.parse_dataset_path)
+ dataset_path = staticmethod(MigrationServiceClient.dataset_path)
+ parse_dataset_path = staticmethod(MigrationServiceClient.parse_dataset_path)
+ model_path = staticmethod(MigrationServiceClient.model_path)
+ parse_model_path = staticmethod(MigrationServiceClient.parse_model_path)
+ model_path = staticmethod(MigrationServiceClient.model_path)
+ parse_model_path = staticmethod(MigrationServiceClient.parse_model_path)
+ version_path = staticmethod(MigrationServiceClient.version_path)
+ parse_version_path = staticmethod(MigrationServiceClient.parse_version_path)
+
+ common_billing_account_path = staticmethod(
+ MigrationServiceClient.common_billing_account_path
+ )
+ parse_common_billing_account_path = staticmethod(
+ MigrationServiceClient.parse_common_billing_account_path
+ )
+
+ common_folder_path = staticmethod(MigrationServiceClient.common_folder_path)
+ parse_common_folder_path = staticmethod(
+ MigrationServiceClient.parse_common_folder_path
+ )
+
+ common_organization_path = staticmethod(
+ MigrationServiceClient.common_organization_path
+ )
+ parse_common_organization_path = staticmethod(
+ MigrationServiceClient.parse_common_organization_path
+ )
+
+ common_project_path = staticmethod(MigrationServiceClient.common_project_path)
+ parse_common_project_path = staticmethod(
+ MigrationServiceClient.parse_common_project_path
+ )
+
+ common_location_path = staticmethod(MigrationServiceClient.common_location_path)
+ parse_common_location_path = staticmethod(
+ MigrationServiceClient.parse_common_location_path
+ )
+
+ from_service_account_info = MigrationServiceClient.from_service_account_info
+ from_service_account_file = MigrationServiceClient.from_service_account_file
+ from_service_account_json = from_service_account_file
+
+ @property
+ def transport(self) -> MigrationServiceTransport:
+ """Return the transport used by the client instance.
+
+ Returns:
+ MigrationServiceTransport: The transport used by the client instance.
+ """
+ return self._client.transport
+
+ get_transport_class = functools.partial(
+ type(MigrationServiceClient).get_transport_class, type(MigrationServiceClient)
+ )
+
+ def __init__(
+ self,
+ *,
+ credentials: credentials.Credentials = None,
+ transport: Union[str, MigrationServiceTransport] = "grpc_asyncio",
+ client_options: ClientOptions = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ ) -> None:
+ """Instantiate the migration service client.
+
+ Args:
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ transport (Union[str, ~.MigrationServiceTransport]): The
+ transport to use. If set to None, a transport is chosen
+ automatically.
+ client_options (ClientOptions): Custom options for the client. It
+ won't take effect if a ``transport`` instance is provided.
+ (1) The ``api_endpoint`` property can be used to override the
+ default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
+ environment variable can also be used to override the endpoint:
+ "always" (always use the default mTLS endpoint), "never" (always
+ use the default regular endpoint) and "auto" (auto switch to the
+ default mTLS endpoint if client certificate is present, this is
+ the default value). However, the ``api_endpoint`` property takes
+ precedence if provided.
+ (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
+ is "true", then the ``client_cert_source`` property can be used
+ to provide client certificate for mutual TLS transport. If
+ not provided, the default SSL client certificate will be used if
+ present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
+ set, no client certificate will be used.
+
+ Raises:
+ google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
+ creation failed for any reason.
+ """
+
+ self._client = MigrationServiceClient(
+ credentials=credentials,
+ transport=transport,
+ client_options=client_options,
+ client_info=client_info,
+ )
+
+ async def search_migratable_resources(
+ self,
+ request: migration_service.SearchMigratableResourcesRequest = None,
+ *,
+ parent: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> pagers.SearchMigratableResourcesAsyncPager:
+ r"""Searches all of the resources in
+ automl.googleapis.com, datalabeling.googleapis.com and
+ ml.googleapis.com that can be migrated to AI Platform's
+ given location.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.SearchMigratableResourcesRequest`):
+ The request object. Request message for
+ ``MigrationService.SearchMigratableResources``.
+ parent (:class:`str`):
+ Required. The location that the migratable resources
+ should be searched from. It's the AI Platform location
+ that the resources can be migrated to, not the
+ resources' original location. Format:
+ ``projects/{project}/locations/{location}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.services.migration_service.pagers.SearchMigratableResourcesAsyncPager:
+ Response message for
+ ``MigrationService.SearchMigratableResources``.
+
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = migration_service.SearchMigratableResourcesRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.search_migratable_resources,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__aiter__` convenience method.
+ response = pagers.SearchMigratableResourcesAsyncPager(
+ method=rpc, request=request, response=response, metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def batch_migrate_resources(
+ self,
+ request: migration_service.BatchMigrateResourcesRequest = None,
+ *,
+ parent: str = None,
+ migrate_resource_requests: Sequence[
+ migration_service.MigrateResourceRequest
+ ] = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Batch migrates resources from ml.googleapis.com,
+ automl.googleapis.com, and datalabeling.googleapis.com
+ to AI Platform (Unified).
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.BatchMigrateResourcesRequest`):
+ The request object. Request message for
+ ``MigrationService.BatchMigrateResources``.
+ parent (:class:`str`):
+ Required. The location of the migrated resource will
+ live in. Format:
+ ``projects/{project}/locations/{location}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ migrate_resource_requests (:class:`Sequence[google.cloud.aiplatform_v1.types.MigrateResourceRequest]`):
+ Required. The request messages
+ specifying the resources to migrate.
+ They must be in the same location as the
+ destination. Up to 50 resources can be
+ migrated in one batch.
+
+ This corresponds to the ``migrate_resource_requests`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:`google.cloud.aiplatform_v1.types.BatchMigrateResourcesResponse`
+ Response message for
+ ``MigrationService.BatchMigrateResources``.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent, migrate_resource_requests])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = migration_service.BatchMigrateResourcesRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+
+ if migrate_resource_requests:
+ request.migrate_resource_requests.extend(migrate_resource_requests)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.batch_migrate_resources,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ migration_service.BatchMigrateResourcesResponse,
+ metadata_type=migration_service.BatchMigrateResourcesOperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+
+try:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=pkg_resources.get_distribution(
+ "google-cloud-aiplatform",
+ ).version,
+ )
+except pkg_resources.DistributionNotFound:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
+
+
+__all__ = ("MigrationServiceAsyncClient",)
diff --git a/google/cloud/aiplatform_v1/services/migration_service/client.py b/google/cloud/aiplatform_v1/services/migration_service/client.py
new file mode 100644
index 0000000000..3ed18e0fa8
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/migration_service/client.py
@@ -0,0 +1,654 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from collections import OrderedDict
+from distutils import util
+import os
+import re
+from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
+import pkg_resources
+
+from google.api_core import client_options as client_options_lib # type: ignore
+from google.api_core import exceptions # type: ignore
+from google.api_core import gapic_v1 # type: ignore
+from google.api_core import retry as retries # type: ignore
+from google.auth import credentials # type: ignore
+from google.auth.transport import mtls # type: ignore
+from google.auth.transport.grpc import SslCredentials # type: ignore
+from google.auth.exceptions import MutualTLSChannelError # type: ignore
+from google.oauth2 import service_account # type: ignore
+
+from google.api_core import operation # type: ignore
+from google.api_core import operation_async # type: ignore
+from google.cloud.aiplatform_v1.services.migration_service import pagers
+from google.cloud.aiplatform_v1.types import migratable_resource
+from google.cloud.aiplatform_v1.types import migration_service
+
+from .transports.base import MigrationServiceTransport, DEFAULT_CLIENT_INFO
+from .transports.grpc import MigrationServiceGrpcTransport
+from .transports.grpc_asyncio import MigrationServiceGrpcAsyncIOTransport
+
+
+class MigrationServiceClientMeta(type):
+ """Metaclass for the MigrationService client.
+
+ This provides class-level methods for building and retrieving
+ support objects (e.g. transport) without polluting the client instance
+ objects.
+ """
+
+ _transport_registry = (
+ OrderedDict()
+ ) # type: Dict[str, Type[MigrationServiceTransport]]
+ _transport_registry["grpc"] = MigrationServiceGrpcTransport
+ _transport_registry["grpc_asyncio"] = MigrationServiceGrpcAsyncIOTransport
+
+ def get_transport_class(cls, label: str = None,) -> Type[MigrationServiceTransport]:
+ """Return an appropriate transport class.
+
+ Args:
+ label: The name of the desired transport. If none is
+ provided, then the first transport in the registry is used.
+
+ Returns:
+ The transport class to use.
+ """
+ # If a specific transport is requested, return that one.
+ if label:
+ return cls._transport_registry[label]
+
+ # No transport is requested; return the default (that is, the first one
+ # in the dictionary).
+ return next(iter(cls._transport_registry.values()))
+
+
+class MigrationServiceClient(metaclass=MigrationServiceClientMeta):
+ """A service that migrates resources from automl.googleapis.com,
+ datalabeling.googleapis.com and ml.googleapis.com to AI
+ Platform.
+ """
+
+ @staticmethod
+ def _get_default_mtls_endpoint(api_endpoint):
+ """Convert api endpoint to mTLS endpoint.
+ Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
+ "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
+ Args:
+ api_endpoint (Optional[str]): the api endpoint to convert.
+ Returns:
+ str: converted mTLS api endpoint.
+ """
+ if not api_endpoint:
+ return api_endpoint
+
+ mtls_endpoint_re = re.compile(
+ r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?"
+ )
+
+ m = mtls_endpoint_re.match(api_endpoint)
+ name, mtls, sandbox, googledomain = m.groups()
+ if mtls or not googledomain:
+ return api_endpoint
+
+ if sandbox:
+ return api_endpoint.replace(
+ "sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
+ )
+
+ return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
+
+ DEFAULT_ENDPOINT = "aiplatform.googleapis.com"
+ DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
+ DEFAULT_ENDPOINT
+ )
+
+ @classmethod
+ def from_service_account_info(cls, info: dict, *args, **kwargs):
+ """Creates an instance of this client using the provided credentials info.
+
+ Args:
+ info (dict): The service account private key info.
+ args: Additional arguments to pass to the constructor.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ MigrationServiceClient: The constructed client.
+ """
+ credentials = service_account.Credentials.from_service_account_info(info)
+ kwargs["credentials"] = credentials
+ return cls(*args, **kwargs)
+
+ @classmethod
+ def from_service_account_file(cls, filename: str, *args, **kwargs):
+ """Creates an instance of this client using the provided credentials
+ file.
+
+ Args:
+ filename (str): The path to the service account private key json
+ file.
+ args: Additional arguments to pass to the constructor.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ MigrationServiceClient: The constructed client.
+ """
+ credentials = service_account.Credentials.from_service_account_file(filename)
+ kwargs["credentials"] = credentials
+ return cls(*args, **kwargs)
+
+ from_service_account_json = from_service_account_file
+
+ @property
+ def transport(self) -> MigrationServiceTransport:
+ """Return the transport used by the client instance.
+
+ Returns:
+ MigrationServiceTransport: The transport used by the client instance.
+ """
+ return self._transport
+
+ @staticmethod
+ def annotated_dataset_path(
+ project: str, dataset: str, annotated_dataset: str,
+ ) -> str:
+ """Return a fully-qualified annotated_dataset string."""
+ return "projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}".format(
+ project=project, dataset=dataset, annotated_dataset=annotated_dataset,
+ )
+
+ @staticmethod
+ def parse_annotated_dataset_path(path: str) -> Dict[str, str]:
+ """Parse a annotated_dataset path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/datasets/(?P.+?)/annotatedDatasets/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def dataset_path(project: str, location: str, dataset: str,) -> str:
+ """Return a fully-qualified dataset string."""
+ return "projects/{project}/locations/{location}/datasets/{dataset}".format(
+ project=project, location=location, dataset=dataset,
+ )
+
+ @staticmethod
+ def parse_dataset_path(path: str) -> Dict[str, str]:
+ """Parse a dataset path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def dataset_path(project: str, location: str, dataset: str,) -> str:
+ """Return a fully-qualified dataset string."""
+ return "projects/{project}/locations/{location}/datasets/{dataset}".format(
+ project=project, location=location, dataset=dataset,
+ )
+
+ @staticmethod
+ def parse_dataset_path(path: str) -> Dict[str, str]:
+ """Parse a dataset path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def dataset_path(project: str, dataset: str,) -> str:
+ """Return a fully-qualified dataset string."""
+ return "projects/{project}/datasets/{dataset}".format(
+ project=project, dataset=dataset,
+ )
+
+ @staticmethod
+ def parse_dataset_path(path: str) -> Dict[str, str]:
+ """Parse a dataset path into its component segments."""
+ m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def model_path(project: str, location: str, model: str,) -> str:
+ """Return a fully-qualified model string."""
+ return "projects/{project}/locations/{location}/models/{model}".format(
+ project=project, location=location, model=model,
+ )
+
+ @staticmethod
+ def parse_model_path(path: str) -> Dict[str, str]:
+ """Parse a model path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def model_path(project: str, location: str, model: str,) -> str:
+ """Return a fully-qualified model string."""
+ return "projects/{project}/locations/{location}/models/{model}".format(
+ project=project, location=location, model=model,
+ )
+
+ @staticmethod
+ def parse_model_path(path: str) -> Dict[str, str]:
+ """Parse a model path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def version_path(project: str, model: str, version: str,) -> str:
+ """Return a fully-qualified version string."""
+ return "projects/{project}/models/{model}/versions/{version}".format(
+ project=project, model=model, version=version,
+ )
+
+ @staticmethod
+ def parse_version_path(path: str) -> Dict[str, str]:
+ """Parse a version path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/models/(?P.+?)/versions/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_billing_account_path(billing_account: str,) -> str:
+ """Return a fully-qualified billing_account string."""
+ return "billingAccounts/{billing_account}".format(
+ billing_account=billing_account,
+ )
+
+ @staticmethod
+ def parse_common_billing_account_path(path: str) -> Dict[str, str]:
+ """Parse a billing_account path into its component segments."""
+ m = re.match(r"^billingAccounts/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_folder_path(folder: str,) -> str:
+ """Return a fully-qualified folder string."""
+ return "folders/{folder}".format(folder=folder,)
+
+ @staticmethod
+ def parse_common_folder_path(path: str) -> Dict[str, str]:
+ """Parse a folder path into its component segments."""
+ m = re.match(r"^folders/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_organization_path(organization: str,) -> str:
+ """Return a fully-qualified organization string."""
+ return "organizations/{organization}".format(organization=organization,)
+
+ @staticmethod
+ def parse_common_organization_path(path: str) -> Dict[str, str]:
+ """Parse a organization path into its component segments."""
+ m = re.match(r"^organizations/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_project_path(project: str,) -> str:
+ """Return a fully-qualified project string."""
+ return "projects/{project}".format(project=project,)
+
+ @staticmethod
+ def parse_common_project_path(path: str) -> Dict[str, str]:
+ """Parse a project path into its component segments."""
+ m = re.match(r"^projects/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_location_path(project: str, location: str,) -> str:
+ """Return a fully-qualified location string."""
+ return "projects/{project}/locations/{location}".format(
+ project=project, location=location,
+ )
+
+ @staticmethod
+ def parse_common_location_path(path: str) -> Dict[str, str]:
+ """Parse a location path into its component segments."""
+ m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ def __init__(
+ self,
+ *,
+ credentials: Optional[credentials.Credentials] = None,
+ transport: Union[str, MigrationServiceTransport, None] = None,
+ client_options: Optional[client_options_lib.ClientOptions] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ ) -> None:
+ """Instantiate the migration service client.
+
+ Args:
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ transport (Union[str, MigrationServiceTransport]): The
+ transport to use. If set to None, a transport is chosen
+ automatically.
+ client_options (google.api_core.client_options.ClientOptions): Custom options for the
+ client. It won't take effect if a ``transport`` instance is provided.
+ (1) The ``api_endpoint`` property can be used to override the
+ default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
+ environment variable can also be used to override the endpoint:
+ "always" (always use the default mTLS endpoint), "never" (always
+ use the default regular endpoint) and "auto" (auto switch to the
+ default mTLS endpoint if client certificate is present, this is
+ the default value). However, the ``api_endpoint`` property takes
+ precedence if provided.
+ (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
+ is "true", then the ``client_cert_source`` property can be used
+ to provide client certificate for mutual TLS transport. If
+ not provided, the default SSL client certificate will be used if
+ present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
+ set, no client certificate will be used.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+
+ Raises:
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
+ creation failed for any reason.
+ """
+ if isinstance(client_options, dict):
+ client_options = client_options_lib.from_dict(client_options)
+ if client_options is None:
+ client_options = client_options_lib.ClientOptions()
+
+ # Create SSL credentials for mutual TLS if needed.
+ use_client_cert = bool(
+ util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
+ )
+
+ client_cert_source_func = None
+ is_mtls = False
+ if use_client_cert:
+ if client_options.client_cert_source:
+ is_mtls = True
+ client_cert_source_func = client_options.client_cert_source
+ else:
+ is_mtls = mtls.has_default_client_cert_source()
+ client_cert_source_func = (
+ mtls.default_client_cert_source() if is_mtls else None
+ )
+
+ # Figure out which api endpoint to use.
+ if client_options.api_endpoint is not None:
+ api_endpoint = client_options.api_endpoint
+ else:
+ use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
+ if use_mtls_env == "never":
+ api_endpoint = self.DEFAULT_ENDPOINT
+ elif use_mtls_env == "always":
+ api_endpoint = self.DEFAULT_MTLS_ENDPOINT
+ elif use_mtls_env == "auto":
+ api_endpoint = (
+ self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT
+ )
+ else:
+ raise MutualTLSChannelError(
+ "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
+ )
+
+ # Save or instantiate the transport.
+ # Ordinarily, we provide the transport, but allowing a custom transport
+ # instance provides an extensibility point for unusual situations.
+ if isinstance(transport, MigrationServiceTransport):
+ # transport is a MigrationServiceTransport instance.
+ if credentials or client_options.credentials_file:
+ raise ValueError(
+ "When providing a transport instance, "
+ "provide its credentials directly."
+ )
+ if client_options.scopes:
+ raise ValueError(
+ "When providing a transport instance, "
+ "provide its scopes directly."
+ )
+ self._transport = transport
+ else:
+ Transport = type(self).get_transport_class(transport)
+ self._transport = Transport(
+ credentials=credentials,
+ credentials_file=client_options.credentials_file,
+ host=api_endpoint,
+ scopes=client_options.scopes,
+ client_cert_source_for_mtls=client_cert_source_func,
+ quota_project_id=client_options.quota_project_id,
+ client_info=client_info,
+ )
+
+ def search_migratable_resources(
+ self,
+ request: migration_service.SearchMigratableResourcesRequest = None,
+ *,
+ parent: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> pagers.SearchMigratableResourcesPager:
+ r"""Searches all of the resources in
+ automl.googleapis.com, datalabeling.googleapis.com and
+ ml.googleapis.com that can be migrated to AI Platform's
+ given location.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.SearchMigratableResourcesRequest):
+ The request object. Request message for
+ ``MigrationService.SearchMigratableResources``.
+ parent (str):
+ Required. The location that the migratable resources
+ should be searched from. It's the AI Platform location
+ that the resources can be migrated to, not the
+ resources' original location. Format:
+ ``projects/{project}/locations/{location}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.services.migration_service.pagers.SearchMigratableResourcesPager:
+ Response message for
+ ``MigrationService.SearchMigratableResources``.
+
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a migration_service.SearchMigratableResourcesRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, migration_service.SearchMigratableResourcesRequest):
+ request = migration_service.SearchMigratableResourcesRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[
+ self._transport.search_migratable_resources
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__iter__` convenience method.
+ response = pagers.SearchMigratableResourcesPager(
+ method=rpc, request=request, response=response, metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def batch_migrate_resources(
+ self,
+ request: migration_service.BatchMigrateResourcesRequest = None,
+ *,
+ parent: str = None,
+ migrate_resource_requests: Sequence[
+ migration_service.MigrateResourceRequest
+ ] = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> operation.Operation:
+ r"""Batch migrates resources from ml.googleapis.com,
+ automl.googleapis.com, and datalabeling.googleapis.com
+ to AI Platform (Unified).
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.BatchMigrateResourcesRequest):
+ The request object. Request message for
+ ``MigrationService.BatchMigrateResources``.
+ parent (str):
+ Required. The location of the migrated resource will
+ live in. Format:
+ ``projects/{project}/locations/{location}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ migrate_resource_requests (Sequence[google.cloud.aiplatform_v1.types.MigrateResourceRequest]):
+ Required. The request messages
+ specifying the resources to migrate.
+ They must be in the same location as the
+ destination. Up to 50 resources can be
+ migrated in one batch.
+
+ This corresponds to the ``migrate_resource_requests`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation.Operation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:`google.cloud.aiplatform_v1.types.BatchMigrateResourcesResponse`
+ Response message for
+ ``MigrationService.BatchMigrateResources``.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent, migrate_resource_requests])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a migration_service.BatchMigrateResourcesRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, migration_service.BatchMigrateResourcesRequest):
+ request = migration_service.BatchMigrateResourcesRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+
+ if migrate_resource_requests:
+ request.migrate_resource_requests.extend(migrate_resource_requests)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.batch_migrate_resources]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = operation.from_gapic(
+ response,
+ self._transport.operations_client,
+ migration_service.BatchMigrateResourcesResponse,
+ metadata_type=migration_service.BatchMigrateResourcesOperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+
+try:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=pkg_resources.get_distribution(
+ "google-cloud-aiplatform",
+ ).version,
+ )
+except pkg_resources.DistributionNotFound:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
+
+
+__all__ = ("MigrationServiceClient",)
diff --git a/google/cloud/aiplatform_v1/services/migration_service/pagers.py b/google/cloud/aiplatform_v1/services/migration_service/pagers.py
new file mode 100644
index 0000000000..b7d9f4ae44
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/migration_service/pagers.py
@@ -0,0 +1,153 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple
+
+from google.cloud.aiplatform_v1.types import migratable_resource
+from google.cloud.aiplatform_v1.types import migration_service
+
+
+class SearchMigratableResourcesPager:
+ """A pager for iterating through ``search_migratable_resources`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.cloud.aiplatform_v1.types.SearchMigratableResourcesResponse` object, and
+ provides an ``__iter__`` method to iterate through its
+ ``migratable_resources`` field.
+
+ If there are more pages, the ``__iter__`` method will make additional
+ ``SearchMigratableResources`` requests and continue to iterate
+ through the ``migratable_resources`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.cloud.aiplatform_v1.types.SearchMigratableResourcesResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[..., migration_service.SearchMigratableResourcesResponse],
+ request: migration_service.SearchMigratableResourcesRequest,
+ response: migration_service.SearchMigratableResourcesResponse,
+ *,
+ metadata: Sequence[Tuple[str, str]] = ()
+ ):
+ """Instantiate the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.cloud.aiplatform_v1.types.SearchMigratableResourcesRequest):
+ The initial request object.
+ response (google.cloud.aiplatform_v1.types.SearchMigratableResourcesResponse):
+ The initial response object.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ self._method = method
+ self._request = migration_service.SearchMigratableResourcesRequest(request)
+ self._response = response
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ def pages(self) -> Iterable[migration_service.SearchMigratableResourcesResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = self._method(self._request, metadata=self._metadata)
+ yield self._response
+
+ def __iter__(self) -> Iterable[migratable_resource.MigratableResource]:
+ for page in self.pages:
+ yield from page.migratable_resources
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
+
+
+class SearchMigratableResourcesAsyncPager:
+ """A pager for iterating through ``search_migratable_resources`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.cloud.aiplatform_v1.types.SearchMigratableResourcesResponse` object, and
+ provides an ``__aiter__`` method to iterate through its
+ ``migratable_resources`` field.
+
+ If there are more pages, the ``__aiter__`` method will make additional
+ ``SearchMigratableResources`` requests and continue to iterate
+ through the ``migratable_resources`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.cloud.aiplatform_v1.types.SearchMigratableResourcesResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[
+ ..., Awaitable[migration_service.SearchMigratableResourcesResponse]
+ ],
+ request: migration_service.SearchMigratableResourcesRequest,
+ response: migration_service.SearchMigratableResourcesResponse,
+ *,
+ metadata: Sequence[Tuple[str, str]] = ()
+ ):
+ """Instantiate the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.cloud.aiplatform_v1.types.SearchMigratableResourcesRequest):
+ The initial request object.
+ response (google.cloud.aiplatform_v1.types.SearchMigratableResourcesResponse):
+ The initial response object.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ self._method = method
+ self._request = migration_service.SearchMigratableResourcesRequest(request)
+ self._response = response
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ async def pages(
+ self,
+ ) -> AsyncIterable[migration_service.SearchMigratableResourcesResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = await self._method(self._request, metadata=self._metadata)
+ yield self._response
+
+ def __aiter__(self) -> AsyncIterable[migratable_resource.MigratableResource]:
+ async def async_generator():
+ async for page in self.pages:
+ for response in page.migratable_resources:
+ yield response
+
+ return async_generator()
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
diff --git a/google/cloud/aiplatform_v1/services/migration_service/transports/__init__.py b/google/cloud/aiplatform_v1/services/migration_service/transports/__init__.py
new file mode 100644
index 0000000000..38c72756f6
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/migration_service/transports/__init__.py
@@ -0,0 +1,35 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from collections import OrderedDict
+from typing import Dict, Type
+
+from .base import MigrationServiceTransport
+from .grpc import MigrationServiceGrpcTransport
+from .grpc_asyncio import MigrationServiceGrpcAsyncIOTransport
+
+
+# Compile a registry of transports.
+_transport_registry = OrderedDict() # type: Dict[str, Type[MigrationServiceTransport]]
+_transport_registry["grpc"] = MigrationServiceGrpcTransport
+_transport_registry["grpc_asyncio"] = MigrationServiceGrpcAsyncIOTransport
+
+__all__ = (
+ "MigrationServiceTransport",
+ "MigrationServiceGrpcTransport",
+ "MigrationServiceGrpcAsyncIOTransport",
+)
diff --git a/google/cloud/aiplatform_v1/services/migration_service/transports/base.py b/google/cloud/aiplatform_v1/services/migration_service/transports/base.py
new file mode 100644
index 0000000000..da4cabae63
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/migration_service/transports/base.py
@@ -0,0 +1,150 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import abc
+import typing
+import pkg_resources
+
+from google import auth # type: ignore
+from google.api_core import exceptions # type: ignore
+from google.api_core import gapic_v1 # type: ignore
+from google.api_core import retry as retries # type: ignore
+from google.api_core import operations_v1 # type: ignore
+from google.auth import credentials # type: ignore
+
+from google.cloud.aiplatform_v1.types import migration_service
+from google.longrunning import operations_pb2 as operations # type: ignore
+
+
+try:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=pkg_resources.get_distribution(
+ "google-cloud-aiplatform",
+ ).version,
+ )
+except pkg_resources.DistributionNotFound:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
+
+
+class MigrationServiceTransport(abc.ABC):
+ """Abstract transport class for MigrationService."""
+
+ AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
+
+ def __init__(
+ self,
+ *,
+ host: str = "aiplatform.googleapis.com",
+ credentials: credentials.Credentials = None,
+ credentials_file: typing.Optional[str] = None,
+ scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES,
+ quota_project_id: typing.Optional[str] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ **kwargs,
+ ) -> None:
+ """Instantiate the transport.
+
+ Args:
+ host (Optional[str]): The hostname to connect to.
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is mutually exclusive with credentials.
+ scope (Optional[Sequence[str]]): A list of scopes.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+ """
+ # Save the hostname. Default to port 443 (HTTPS) if none is specified.
+ if ":" not in host:
+ host += ":443"
+ self._host = host
+
+ # If no credentials are provided, then determine the appropriate
+ # defaults.
+ if credentials and credentials_file:
+ raise exceptions.DuplicateCredentialArgs(
+ "'credentials_file' and 'credentials' are mutually exclusive"
+ )
+
+ if credentials_file is not None:
+ credentials, _ = auth.load_credentials_from_file(
+ credentials_file, scopes=scopes, quota_project_id=quota_project_id
+ )
+
+ elif credentials is None:
+ credentials, _ = auth.default(
+ scopes=scopes, quota_project_id=quota_project_id
+ )
+
+ # Save the credentials.
+ self._credentials = credentials
+
+ # Lifted into its own function so it can be stubbed out during tests.
+ self._prep_wrapped_messages(client_info)
+
+ def _prep_wrapped_messages(self, client_info):
+ # Precompute the wrapped methods.
+ self._wrapped_methods = {
+ self.search_migratable_resources: gapic_v1.method.wrap_method(
+ self.search_migratable_resources,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.batch_migrate_resources: gapic_v1.method.wrap_method(
+ self.batch_migrate_resources,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ }
+
+ @property
+ def operations_client(self) -> operations_v1.OperationsClient:
+ """Return the client designed to process long-running operations."""
+ raise NotImplementedError()
+
+ @property
+ def search_migratable_resources(
+ self,
+ ) -> typing.Callable[
+ [migration_service.SearchMigratableResourcesRequest],
+ typing.Union[
+ migration_service.SearchMigratableResourcesResponse,
+ typing.Awaitable[migration_service.SearchMigratableResourcesResponse],
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def batch_migrate_resources(
+ self,
+ ) -> typing.Callable[
+ [migration_service.BatchMigrateResourcesRequest],
+ typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ ]:
+ raise NotImplementedError()
+
+
+__all__ = ("MigrationServiceTransport",)
diff --git a/google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py
new file mode 100644
index 0000000000..820a38a028
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py
@@ -0,0 +1,333 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import warnings
+from typing import Callable, Dict, Optional, Sequence, Tuple
+
+from google.api_core import grpc_helpers # type: ignore
+from google.api_core import operations_v1 # type: ignore
+from google.api_core import gapic_v1 # type: ignore
+from google import auth # type: ignore
+from google.auth import credentials # type: ignore
+from google.auth.transport.grpc import SslCredentials # type: ignore
+
+import grpc # type: ignore
+
+from google.cloud.aiplatform_v1.types import migration_service
+from google.longrunning import operations_pb2 as operations # type: ignore
+
+from .base import MigrationServiceTransport, DEFAULT_CLIENT_INFO
+
+
+class MigrationServiceGrpcTransport(MigrationServiceTransport):
+ """gRPC backend transport for MigrationService.
+
+ A service that migrates resources from automl.googleapis.com,
+ datalabeling.googleapis.com and ml.googleapis.com to AI
+ Platform.
+
+ This class defines the same methods as the primary client, so the
+ primary client can load the underlying transport implementation
+ and call it.
+
+ It sends protocol buffers over the wire using gRPC (which is built on
+ top of HTTP/2); the ``grpcio`` package must be installed.
+ """
+
+ _stubs: Dict[str, Callable]
+
+ def __init__(
+ self,
+ *,
+ host: str = "aiplatform.googleapis.com",
+ credentials: credentials.Credentials = None,
+ credentials_file: str = None,
+ scopes: Sequence[str] = None,
+ channel: grpc.Channel = None,
+ api_mtls_endpoint: str = None,
+ client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
+ ssl_channel_credentials: grpc.ChannelCredentials = None,
+ client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
+ quota_project_id: Optional[str] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ ) -> None:
+ """Instantiate the transport.
+
+ Args:
+ host (Optional[str]): The hostname to connect to.
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ This argument is ignored if ``channel`` is provided.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is ignored if ``channel`` is provided.
+ scopes (Optional(Sequence[str])): A list of scopes. This argument is
+ ignored if ``channel`` is provided.
+ channel (Optional[grpc.Channel]): A ``Channel`` instance through
+ which to make calls.
+ api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
+ If provided, it overrides the ``host`` argument and tries to create
+ a mutual TLS channel with client SSL credentials from
+ ``client_cert_source`` or applicatin default SSL credentials.
+ client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
+ Deprecated. A callback to provide client SSL certificate bytes and
+ private key bytes, both in PEM format. It is ignored if
+ ``api_mtls_endpoint`` is None.
+ ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
+ for grpc channel. It is ignored if ``channel`` is provided.
+ client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
+ A callback to provide client certificate bytes and private key bytes,
+ both in PEM format. It is used to configure mutual TLS channel. It is
+ ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+
+ Raises:
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
+ creation failed for any reason.
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
+ and ``credentials_file`` are passed.
+ """
+ self._ssl_channel_credentials = ssl_channel_credentials
+
+ if api_mtls_endpoint:
+ warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
+ if client_cert_source:
+ warnings.warn("client_cert_source is deprecated", DeprecationWarning)
+
+ if channel:
+ # Sanity check: Ensure that channel and credentials are not both
+ # provided.
+ credentials = False
+
+ # If a channel was explicitly provided, set it.
+ self._grpc_channel = channel
+ self._ssl_channel_credentials = None
+ elif api_mtls_endpoint:
+ host = (
+ api_mtls_endpoint
+ if ":" in api_mtls_endpoint
+ else api_mtls_endpoint + ":443"
+ )
+
+ if credentials is None:
+ credentials, _ = auth.default(
+ scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
+ )
+
+ # Create SSL credentials with client_cert_source or application
+ # default SSL credentials.
+ if client_cert_source:
+ cert, key = client_cert_source()
+ ssl_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+ else:
+ ssl_credentials = SslCredentials().ssl_credentials
+
+ # create a new channel. The provided one is ignored.
+ self._grpc_channel = type(self).create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ ssl_credentials=ssl_credentials,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ self._ssl_channel_credentials = ssl_credentials
+ else:
+ host = host if ":" in host else host + ":443"
+
+ if credentials is None:
+ credentials, _ = auth.default(
+ scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
+ )
+
+ if client_cert_source_for_mtls and not ssl_channel_credentials:
+ cert, key = client_cert_source_for_mtls()
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+
+ # create a new channel. The provided one is ignored.
+ self._grpc_channel = type(self).create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ ssl_credentials=self._ssl_channel_credentials,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+ self._stubs = {} # type: Dict[str, Callable]
+ self._operations_client = None
+
+ # Run the base constructor.
+ super().__init__(
+ host=host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ client_info=client_info,
+ )
+
+ @classmethod
+ def create_channel(
+ cls,
+ host: str = "aiplatform.googleapis.com",
+ credentials: credentials.Credentials = None,
+ credentials_file: str = None,
+ scopes: Optional[Sequence[str]] = None,
+ quota_project_id: Optional[str] = None,
+ **kwargs,
+ ) -> grpc.Channel:
+ """Create and return a gRPC channel object.
+ Args:
+ address (Optional[str]): The host for the channel to use.
+ credentials (Optional[~.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify this application to the service. If
+ none are specified, the client will attempt to ascertain
+ the credentials from the environment.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is mutually exclusive with credentials.
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
+ service. These are only used when credentials are not specified and
+ are passed to :func:`google.auth.default`.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ kwargs (Optional[dict]): Keyword arguments, which are passed to the
+ channel creation.
+ Returns:
+ grpc.Channel: A gRPC channel object.
+
+ Raises:
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
+ and ``credentials_file`` are passed.
+ """
+ scopes = scopes or cls.AUTH_SCOPES
+ return grpc_helpers.create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes,
+ quota_project_id=quota_project_id,
+ **kwargs,
+ )
+
+ @property
+ def grpc_channel(self) -> grpc.Channel:
+ """Return the channel designed to connect to this service.
+ """
+ return self._grpc_channel
+
+ @property
+ def operations_client(self) -> operations_v1.OperationsClient:
+ """Create the client designed to process long-running operations.
+
+ This property caches on the instance; repeated calls return the same
+ client.
+ """
+ # Sanity check: Only create a new client if we do not already have one.
+ if self._operations_client is None:
+ self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
+
+ # Return the client from cache.
+ return self._operations_client
+
+ @property
+ def search_migratable_resources(
+ self,
+ ) -> Callable[
+ [migration_service.SearchMigratableResourcesRequest],
+ migration_service.SearchMigratableResourcesResponse,
+ ]:
+ r"""Return a callable for the search migratable resources method over gRPC.
+
+ Searches all of the resources in
+ automl.googleapis.com, datalabeling.googleapis.com and
+ ml.googleapis.com that can be migrated to AI Platform's
+ given location.
+
+ Returns:
+ Callable[[~.SearchMigratableResourcesRequest],
+ ~.SearchMigratableResourcesResponse]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "search_migratable_resources" not in self._stubs:
+ self._stubs["search_migratable_resources"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.MigrationService/SearchMigratableResources",
+ request_serializer=migration_service.SearchMigratableResourcesRequest.serialize,
+ response_deserializer=migration_service.SearchMigratableResourcesResponse.deserialize,
+ )
+ return self._stubs["search_migratable_resources"]
+
+ @property
+ def batch_migrate_resources(
+ self,
+ ) -> Callable[
+ [migration_service.BatchMigrateResourcesRequest], operations.Operation
+ ]:
+ r"""Return a callable for the batch migrate resources method over gRPC.
+
+ Batch migrates resources from ml.googleapis.com,
+ automl.googleapis.com, and datalabeling.googleapis.com
+ to AI Platform (Unified).
+
+ Returns:
+ Callable[[~.BatchMigrateResourcesRequest],
+ ~.Operation]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "batch_migrate_resources" not in self._stubs:
+ self._stubs["batch_migrate_resources"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.MigrationService/BatchMigrateResources",
+ request_serializer=migration_service.BatchMigrateResourcesRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["batch_migrate_resources"]
+
+
+__all__ = ("MigrationServiceGrpcTransport",)
diff --git a/google/cloud/aiplatform_v1/services/migration_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/migration_service/transports/grpc_asyncio.py
new file mode 100644
index 0000000000..dbdddf31e5
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/migration_service/transports/grpc_asyncio.py
@@ -0,0 +1,340 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import warnings
+from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple
+
+from google.api_core import gapic_v1 # type: ignore
+from google.api_core import grpc_helpers_async # type: ignore
+from google.api_core import operations_v1 # type: ignore
+from google import auth # type: ignore
+from google.auth import credentials # type: ignore
+from google.auth.transport.grpc import SslCredentials # type: ignore
+
+import grpc # type: ignore
+from grpc.experimental import aio # type: ignore
+
+from google.cloud.aiplatform_v1.types import migration_service
+from google.longrunning import operations_pb2 as operations # type: ignore
+
+from .base import MigrationServiceTransport, DEFAULT_CLIENT_INFO
+from .grpc import MigrationServiceGrpcTransport
+
+
+class MigrationServiceGrpcAsyncIOTransport(MigrationServiceTransport):
+ """gRPC AsyncIO backend transport for MigrationService.
+
+ A service that migrates resources from automl.googleapis.com,
+ datalabeling.googleapis.com and ml.googleapis.com to AI
+ Platform.
+
+ This class defines the same methods as the primary client, so the
+ primary client can load the underlying transport implementation
+ and call it.
+
+ It sends protocol buffers over the wire using gRPC (which is built on
+ top of HTTP/2); the ``grpcio`` package must be installed.
+ """
+
+ _grpc_channel: aio.Channel
+ _stubs: Dict[str, Callable] = {}
+
+ @classmethod
+ def create_channel(
+ cls,
+ host: str = "aiplatform.googleapis.com",
+ credentials: credentials.Credentials = None,
+ credentials_file: Optional[str] = None,
+ scopes: Optional[Sequence[str]] = None,
+ quota_project_id: Optional[str] = None,
+ **kwargs,
+ ) -> aio.Channel:
+ """Create and return a gRPC AsyncIO channel object.
+ Args:
+ address (Optional[str]): The host for the channel to use.
+ credentials (Optional[~.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify this application to the service. If
+ none are specified, the client will attempt to ascertain
+ the credentials from the environment.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is ignored if ``channel`` is provided.
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
+ service. These are only used when credentials are not specified and
+ are passed to :func:`google.auth.default`.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ kwargs (Optional[dict]): Keyword arguments, which are passed to the
+ channel creation.
+ Returns:
+ aio.Channel: A gRPC AsyncIO channel object.
+ """
+ scopes = scopes or cls.AUTH_SCOPES
+ return grpc_helpers_async.create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes,
+ quota_project_id=quota_project_id,
+ **kwargs,
+ )
+
+ def __init__(
+ self,
+ *,
+ host: str = "aiplatform.googleapis.com",
+ credentials: credentials.Credentials = None,
+ credentials_file: Optional[str] = None,
+ scopes: Optional[Sequence[str]] = None,
+ channel: aio.Channel = None,
+ api_mtls_endpoint: str = None,
+ client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
+ ssl_channel_credentials: grpc.ChannelCredentials = None,
+ client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
+ quota_project_id=None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ ) -> None:
+ """Instantiate the transport.
+
+ Args:
+ host (Optional[str]): The hostname to connect to.
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ This argument is ignored if ``channel`` is provided.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is ignored if ``channel`` is provided.
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
+ service. These are only used when credentials are not specified and
+ are passed to :func:`google.auth.default`.
+ channel (Optional[aio.Channel]): A ``Channel`` instance through
+ which to make calls.
+ api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
+ If provided, it overrides the ``host`` argument and tries to create
+ a mutual TLS channel with client SSL credentials from
+ ``client_cert_source`` or applicatin default SSL credentials.
+ client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
+ Deprecated. A callback to provide client SSL certificate bytes and
+ private key bytes, both in PEM format. It is ignored if
+ ``api_mtls_endpoint`` is None.
+ ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
+ for grpc channel. It is ignored if ``channel`` is provided.
+ client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
+ A callback to provide client certificate bytes and private key bytes,
+ both in PEM format. It is used to configure mutual TLS channel. It is
+ ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+
+ Raises:
+ google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
+ creation failed for any reason.
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
+ and ``credentials_file`` are passed.
+ """
+ self._ssl_channel_credentials = ssl_channel_credentials
+
+ if api_mtls_endpoint:
+ warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
+ if client_cert_source:
+ warnings.warn("client_cert_source is deprecated", DeprecationWarning)
+
+ if channel:
+ # Sanity check: Ensure that channel and credentials are not both
+ # provided.
+ credentials = False
+
+ # If a channel was explicitly provided, set it.
+ self._grpc_channel = channel
+ self._ssl_channel_credentials = None
+ elif api_mtls_endpoint:
+ host = (
+ api_mtls_endpoint
+ if ":" in api_mtls_endpoint
+ else api_mtls_endpoint + ":443"
+ )
+
+ if credentials is None:
+ credentials, _ = auth.default(
+ scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
+ )
+
+ # Create SSL credentials with client_cert_source or application
+ # default SSL credentials.
+ if client_cert_source:
+ cert, key = client_cert_source()
+ ssl_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+ else:
+ ssl_credentials = SslCredentials().ssl_credentials
+
+ # create a new channel. The provided one is ignored.
+ self._grpc_channel = type(self).create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ ssl_credentials=ssl_credentials,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ self._ssl_channel_credentials = ssl_credentials
+ else:
+ host = host if ":" in host else host + ":443"
+
+ if credentials is None:
+ credentials, _ = auth.default(
+ scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
+ )
+
+ if client_cert_source_for_mtls and not ssl_channel_credentials:
+ cert, key = client_cert_source_for_mtls()
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+
+ # create a new channel. The provided one is ignored.
+ self._grpc_channel = type(self).create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ ssl_credentials=self._ssl_channel_credentials,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+ # Run the base constructor.
+ super().__init__(
+ host=host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ client_info=client_info,
+ )
+
+ self._stubs = {}
+ self._operations_client = None
+
+ @property
+ def grpc_channel(self) -> aio.Channel:
+ """Create the channel designed to connect to this service.
+
+ This property caches on the instance; repeated calls return
+ the same channel.
+ """
+ # Return the channel from cache.
+ return self._grpc_channel
+
+ @property
+ def operations_client(self) -> operations_v1.OperationsAsyncClient:
+ """Create the client designed to process long-running operations.
+
+ This property caches on the instance; repeated calls return the same
+ client.
+ """
+ # Sanity check: Only create a new client if we do not already have one.
+ if self._operations_client is None:
+ self._operations_client = operations_v1.OperationsAsyncClient(
+ self.grpc_channel
+ )
+
+ # Return the client from cache.
+ return self._operations_client
+
+ @property
+ def search_migratable_resources(
+ self,
+ ) -> Callable[
+ [migration_service.SearchMigratableResourcesRequest],
+ Awaitable[migration_service.SearchMigratableResourcesResponse],
+ ]:
+ r"""Return a callable for the search migratable resources method over gRPC.
+
+ Searches all of the resources in
+ automl.googleapis.com, datalabeling.googleapis.com and
+ ml.googleapis.com that can be migrated to AI Platform's
+ given location.
+
+ Returns:
+ Callable[[~.SearchMigratableResourcesRequest],
+ Awaitable[~.SearchMigratableResourcesResponse]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "search_migratable_resources" not in self._stubs:
+ self._stubs["search_migratable_resources"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.MigrationService/SearchMigratableResources",
+ request_serializer=migration_service.SearchMigratableResourcesRequest.serialize,
+ response_deserializer=migration_service.SearchMigratableResourcesResponse.deserialize,
+ )
+ return self._stubs["search_migratable_resources"]
+
+ @property
+ def batch_migrate_resources(
+ self,
+ ) -> Callable[
+ [migration_service.BatchMigrateResourcesRequest],
+ Awaitable[operations.Operation],
+ ]:
+ r"""Return a callable for the batch migrate resources method over gRPC.
+
+ Batch migrates resources from ml.googleapis.com,
+ automl.googleapis.com, and datalabeling.googleapis.com
+ to AI Platform (Unified).
+
+ Returns:
+ Callable[[~.BatchMigrateResourcesRequest],
+ Awaitable[~.Operation]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "batch_migrate_resources" not in self._stubs:
+ self._stubs["batch_migrate_resources"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.MigrationService/BatchMigrateResources",
+ request_serializer=migration_service.BatchMigrateResourcesRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["batch_migrate_resources"]
+
+
+__all__ = ("MigrationServiceGrpcAsyncIOTransport",)
diff --git a/google/cloud/aiplatform_v1/services/model_service/__init__.py b/google/cloud/aiplatform_v1/services/model_service/__init__.py
new file mode 100644
index 0000000000..b39295ebfe
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/model_service/__init__.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from .client import ModelServiceClient
+from .async_client import ModelServiceAsyncClient
+
+__all__ = (
+ "ModelServiceClient",
+ "ModelServiceAsyncClient",
+)
diff --git a/google/cloud/aiplatform_v1/services/model_service/async_client.py b/google/cloud/aiplatform_v1/services/model_service/async_client.py
new file mode 100644
index 0000000000..123b922019
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/model_service/async_client.py
@@ -0,0 +1,1031 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from collections import OrderedDict
+import functools
+import re
+from typing import Dict, Sequence, Tuple, Type, Union
+import pkg_resources
+
+import google.api_core.client_options as ClientOptions # type: ignore
+from google.api_core import exceptions # type: ignore
+from google.api_core import gapic_v1 # type: ignore
+from google.api_core import retry as retries # type: ignore
+from google.auth import credentials # type: ignore
+from google.oauth2 import service_account # type: ignore
+
+from google.api_core import operation as ga_operation # type: ignore
+from google.api_core import operation_async # type: ignore
+from google.cloud.aiplatform_v1.services.model_service import pagers
+from google.cloud.aiplatform_v1.types import deployed_model_ref
+from google.cloud.aiplatform_v1.types import encryption_spec
+from google.cloud.aiplatform_v1.types import model
+from google.cloud.aiplatform_v1.types import model as gca_model
+from google.cloud.aiplatform_v1.types import model_evaluation
+from google.cloud.aiplatform_v1.types import model_evaluation_slice
+from google.cloud.aiplatform_v1.types import model_service
+from google.cloud.aiplatform_v1.types import operation as gca_operation
+from google.protobuf import empty_pb2 as empty # type: ignore
+from google.protobuf import field_mask_pb2 as field_mask # type: ignore
+from google.protobuf import struct_pb2 as struct # type: ignore
+from google.protobuf import timestamp_pb2 as timestamp # type: ignore
+
+from .transports.base import ModelServiceTransport, DEFAULT_CLIENT_INFO
+from .transports.grpc_asyncio import ModelServiceGrpcAsyncIOTransport
+from .client import ModelServiceClient
+
+
+class ModelServiceAsyncClient:
+ """A service for managing AI Platform's machine learning Models."""
+
+ _client: ModelServiceClient
+
+ DEFAULT_ENDPOINT = ModelServiceClient.DEFAULT_ENDPOINT
+ DEFAULT_MTLS_ENDPOINT = ModelServiceClient.DEFAULT_MTLS_ENDPOINT
+
+ endpoint_path = staticmethod(ModelServiceClient.endpoint_path)
+ parse_endpoint_path = staticmethod(ModelServiceClient.parse_endpoint_path)
+ model_path = staticmethod(ModelServiceClient.model_path)
+ parse_model_path = staticmethod(ModelServiceClient.parse_model_path)
+ model_evaluation_path = staticmethod(ModelServiceClient.model_evaluation_path)
+ parse_model_evaluation_path = staticmethod(
+ ModelServiceClient.parse_model_evaluation_path
+ )
+ model_evaluation_slice_path = staticmethod(
+ ModelServiceClient.model_evaluation_slice_path
+ )
+ parse_model_evaluation_slice_path = staticmethod(
+ ModelServiceClient.parse_model_evaluation_slice_path
+ )
+ training_pipeline_path = staticmethod(ModelServiceClient.training_pipeline_path)
+ parse_training_pipeline_path = staticmethod(
+ ModelServiceClient.parse_training_pipeline_path
+ )
+
+ common_billing_account_path = staticmethod(
+ ModelServiceClient.common_billing_account_path
+ )
+ parse_common_billing_account_path = staticmethod(
+ ModelServiceClient.parse_common_billing_account_path
+ )
+
+ common_folder_path = staticmethod(ModelServiceClient.common_folder_path)
+ parse_common_folder_path = staticmethod(ModelServiceClient.parse_common_folder_path)
+
+ common_organization_path = staticmethod(ModelServiceClient.common_organization_path)
+ parse_common_organization_path = staticmethod(
+ ModelServiceClient.parse_common_organization_path
+ )
+
+ common_project_path = staticmethod(ModelServiceClient.common_project_path)
+ parse_common_project_path = staticmethod(
+ ModelServiceClient.parse_common_project_path
+ )
+
+ common_location_path = staticmethod(ModelServiceClient.common_location_path)
+ parse_common_location_path = staticmethod(
+ ModelServiceClient.parse_common_location_path
+ )
+
+ from_service_account_info = ModelServiceClient.from_service_account_info
+ from_service_account_file = ModelServiceClient.from_service_account_file
+ from_service_account_json = from_service_account_file
+
+ @property
+ def transport(self) -> ModelServiceTransport:
+ """Return the transport used by the client instance.
+
+ Returns:
+ ModelServiceTransport: The transport used by the client instance.
+ """
+ return self._client.transport
+
+ get_transport_class = functools.partial(
+ type(ModelServiceClient).get_transport_class, type(ModelServiceClient)
+ )
+
+ def __init__(
+ self,
+ *,
+ credentials: credentials.Credentials = None,
+ transport: Union[str, ModelServiceTransport] = "grpc_asyncio",
+ client_options: ClientOptions = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ ) -> None:
+ """Instantiate the model service client.
+
+ Args:
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ transport (Union[str, ~.ModelServiceTransport]): The
+ transport to use. If set to None, a transport is chosen
+ automatically.
+ client_options (ClientOptions): Custom options for the client. It
+ won't take effect if a ``transport`` instance is provided.
+ (1) The ``api_endpoint`` property can be used to override the
+ default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
+ environment variable can also be used to override the endpoint:
+ "always" (always use the default mTLS endpoint), "never" (always
+ use the default regular endpoint) and "auto" (auto switch to the
+ default mTLS endpoint if client certificate is present, this is
+ the default value). However, the ``api_endpoint`` property takes
+ precedence if provided.
+ (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
+ is "true", then the ``client_cert_source`` property can be used
+ to provide client certificate for mutual TLS transport. If
+ not provided, the default SSL client certificate will be used if
+ present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
+ set, no client certificate will be used.
+
+ Raises:
+ google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
+ creation failed for any reason.
+ """
+
+ self._client = ModelServiceClient(
+ credentials=credentials,
+ transport=transport,
+ client_options=client_options,
+ client_info=client_info,
+ )
+
+ async def upload_model(
+ self,
+ request: model_service.UploadModelRequest = None,
+ *,
+ parent: str = None,
+ model: gca_model.Model = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Uploads a Model artifact into AI Platform.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.UploadModelRequest`):
+ The request object. Request message for
+ ``ModelService.UploadModel``.
+ parent (:class:`str`):
+ Required. The resource name of the Location into which
+ to upload the Model. Format:
+ ``projects/{project}/locations/{location}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ model (:class:`google.cloud.aiplatform_v1.types.Model`):
+ Required. The Model to create.
+ This corresponds to the ``model`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:`google.cloud.aiplatform_v1.types.UploadModelResponse`
+ Response message of
+ ``ModelService.UploadModel``
+ operation.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent, model])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = model_service.UploadModelRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+ if model is not None:
+ request.model = model
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.upload_model,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ model_service.UploadModelResponse,
+ metadata_type=model_service.UploadModelOperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def get_model(
+ self,
+ request: model_service.GetModelRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> model.Model:
+ r"""Gets a Model.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.GetModelRequest`):
+ The request object. Request message for
+ ``ModelService.GetModel``.
+ name (:class:`str`):
+ Required. The name of the Model resource. Format:
+ ``projects/{project}/locations/{location}/models/{model}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.types.Model:
+ A trained machine learning Model.
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = model_service.GetModelRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.get_model,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ async def list_models(
+ self,
+ request: model_service.ListModelsRequest = None,
+ *,
+ parent: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> pagers.ListModelsAsyncPager:
+ r"""Lists Models in a Location.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.ListModelsRequest`):
+ The request object. Request message for
+ ``ModelService.ListModels``.
+ parent (:class:`str`):
+ Required. The resource name of the Location to list the
+ Models from. Format:
+ ``projects/{project}/locations/{location}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.services.model_service.pagers.ListModelsAsyncPager:
+ Response message for
+ ``ModelService.ListModels``
+
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = model_service.ListModelsRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.list_models,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__aiter__` convenience method.
+ response = pagers.ListModelsAsyncPager(
+ method=rpc, request=request, response=response, metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def update_model(
+ self,
+ request: model_service.UpdateModelRequest = None,
+ *,
+ model: gca_model.Model = None,
+ update_mask: field_mask.FieldMask = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> gca_model.Model:
+ r"""Updates a Model.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.UpdateModelRequest`):
+ The request object. Request message for
+ ``ModelService.UpdateModel``.
+ model (:class:`google.cloud.aiplatform_v1.types.Model`):
+ Required. The Model which replaces
+ the resource on the server.
+
+ This corresponds to the ``model`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
+ Required. The update mask applies to the resource. For
+ the ``FieldMask`` definition, see
+ `FieldMask `__.
+
+ This corresponds to the ``update_mask`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.types.Model:
+ A trained machine learning Model.
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([model, update_mask])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = model_service.UpdateModelRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if model is not None:
+ request.model = model
+ if update_mask is not None:
+ request.update_mask = update_mask
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.update_model,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata(
+ (("model.name", request.model.name),)
+ ),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ async def delete_model(
+ self,
+ request: model_service.DeleteModelRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Deletes a Model.
+ Note: Model can only be deleted if there are no
+ DeployedModels created from it.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.DeleteModelRequest`):
+ The request object. Request message for
+ ``ModelService.DeleteModel``.
+ name (:class:`str`):
+ Required. The name of the Model resource to be deleted.
+ Format:
+ ``projects/{project}/locations/{location}/models/{model}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
+ empty messages in your APIs. A typical example is to
+ use it as the request or the response type of an API
+ method. For instance:
+
+ service Foo {
+ rpc Bar(google.protobuf.Empty) returns
+ (google.protobuf.Empty);
+
+ }
+
+ The JSON representation for Empty is empty JSON
+ object {}.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = model_service.DeleteModelRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.delete_model,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ empty.Empty,
+ metadata_type=gca_operation.DeleteOperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def export_model(
+ self,
+ request: model_service.ExportModelRequest = None,
+ *,
+ name: str = None,
+ output_config: model_service.ExportModelRequest.OutputConfig = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Exports a trained, exportable, Model to a location specified by
+ the user. A Model is considered to be exportable if it has at
+ least one [supported export
+ format][google.cloud.aiplatform.v1.Model.supported_export_formats].
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.ExportModelRequest`):
+ The request object. Request message for
+ ``ModelService.ExportModel``.
+ name (:class:`str`):
+ Required. The resource name of the Model to export.
+ Format:
+ ``projects/{project}/locations/{location}/models/{model}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ output_config (:class:`google.cloud.aiplatform_v1.types.ExportModelRequest.OutputConfig`):
+ Required. The desired output location
+ and configuration.
+
+ This corresponds to the ``output_config`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:`google.cloud.aiplatform_v1.types.ExportModelResponse`
+ Response message of
+ ``ModelService.ExportModel``
+ operation.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name, output_config])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = model_service.ExportModelRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+ if output_config is not None:
+ request.output_config = output_config
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.export_model,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ model_service.ExportModelResponse,
+ metadata_type=model_service.ExportModelOperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def get_model_evaluation(
+ self,
+ request: model_service.GetModelEvaluationRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> model_evaluation.ModelEvaluation:
+ r"""Gets a ModelEvaluation.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.GetModelEvaluationRequest`):
+ The request object. Request message for
+ ``ModelService.GetModelEvaluation``.
+ name (:class:`str`):
+ Required. The name of the ModelEvaluation resource.
+ Format:
+
+ ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.types.ModelEvaluation:
+ A collection of metrics calculated by
+ comparing Model's predictions on all of
+ the test data against annotations from
+ the test data.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = model_service.GetModelEvaluationRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.get_model_evaluation,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ async def list_model_evaluations(
+ self,
+ request: model_service.ListModelEvaluationsRequest = None,
+ *,
+ parent: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> pagers.ListModelEvaluationsAsyncPager:
+ r"""Lists ModelEvaluations in a Model.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.ListModelEvaluationsRequest`):
+ The request object. Request message for
+ ``ModelService.ListModelEvaluations``.
+ parent (:class:`str`):
+ Required. The resource name of the Model to list the
+ ModelEvaluations from. Format:
+ ``projects/{project}/locations/{location}/models/{model}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.services.model_service.pagers.ListModelEvaluationsAsyncPager:
+ Response message for
+ ``ModelService.ListModelEvaluations``.
+
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = model_service.ListModelEvaluationsRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.list_model_evaluations,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__aiter__` convenience method.
+ response = pagers.ListModelEvaluationsAsyncPager(
+ method=rpc, request=request, response=response, metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def get_model_evaluation_slice(
+ self,
+ request: model_service.GetModelEvaluationSliceRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> model_evaluation_slice.ModelEvaluationSlice:
+ r"""Gets a ModelEvaluationSlice.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.GetModelEvaluationSliceRequest`):
+ The request object. Request message for
+ ``ModelService.GetModelEvaluationSlice``.
+ name (:class:`str`):
+ Required. The name of the ModelEvaluationSlice resource.
+ Format:
+
+ ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.types.ModelEvaluationSlice:
+ A collection of metrics calculated by
+ comparing Model's predictions on a slice
+ of the test data against ground truth
+ annotations.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = model_service.GetModelEvaluationSliceRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.get_model_evaluation_slice,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ async def list_model_evaluation_slices(
+ self,
+ request: model_service.ListModelEvaluationSlicesRequest = None,
+ *,
+ parent: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> pagers.ListModelEvaluationSlicesAsyncPager:
+ r"""Lists ModelEvaluationSlices in a ModelEvaluation.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesRequest`):
+ The request object. Request message for
+ ``ModelService.ListModelEvaluationSlices``.
+ parent (:class:`str`):
+ Required. The resource name of the ModelEvaluation to
+ list the ModelEvaluationSlices from. Format:
+
+ ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.services.model_service.pagers.ListModelEvaluationSlicesAsyncPager:
+ Response message for
+ ``ModelService.ListModelEvaluationSlices``.
+
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = model_service.ListModelEvaluationSlicesRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.list_model_evaluation_slices,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__aiter__` convenience method.
+ response = pagers.ListModelEvaluationSlicesAsyncPager(
+ method=rpc, request=request, response=response, metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+
+try:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=pkg_resources.get_distribution(
+ "google-cloud-aiplatform",
+ ).version,
+ )
+except pkg_resources.DistributionNotFound:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
+
+
+__all__ = ("ModelServiceAsyncClient",)
diff --git a/google/cloud/aiplatform_v1/services/model_service/client.py b/google/cloud/aiplatform_v1/services/model_service/client.py
new file mode 100644
index 0000000000..fa75f3c22b
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/model_service/client.py
@@ -0,0 +1,1307 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from collections import OrderedDict
+from distutils import util
+import os
+import re
+from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
+import pkg_resources
+
+from google.api_core import client_options as client_options_lib # type: ignore
+from google.api_core import exceptions # type: ignore
+from google.api_core import gapic_v1 # type: ignore
+from google.api_core import retry as retries # type: ignore
+from google.auth import credentials # type: ignore
+from google.auth.transport import mtls # type: ignore
+from google.auth.transport.grpc import SslCredentials # type: ignore
+from google.auth.exceptions import MutualTLSChannelError # type: ignore
+from google.oauth2 import service_account # type: ignore
+
+from google.api_core import operation as ga_operation # type: ignore
+from google.api_core import operation_async # type: ignore
+from google.cloud.aiplatform_v1.services.model_service import pagers
+from google.cloud.aiplatform_v1.types import deployed_model_ref
+from google.cloud.aiplatform_v1.types import encryption_spec
+from google.cloud.aiplatform_v1.types import model
+from google.cloud.aiplatform_v1.types import model as gca_model
+from google.cloud.aiplatform_v1.types import model_evaluation
+from google.cloud.aiplatform_v1.types import model_evaluation_slice
+from google.cloud.aiplatform_v1.types import model_service
+from google.cloud.aiplatform_v1.types import operation as gca_operation
+from google.protobuf import empty_pb2 as empty # type: ignore
+from google.protobuf import field_mask_pb2 as field_mask # type: ignore
+from google.protobuf import struct_pb2 as struct # type: ignore
+from google.protobuf import timestamp_pb2 as timestamp # type: ignore
+
+from .transports.base import ModelServiceTransport, DEFAULT_CLIENT_INFO
+from .transports.grpc import ModelServiceGrpcTransport
+from .transports.grpc_asyncio import ModelServiceGrpcAsyncIOTransport
+
+
+class ModelServiceClientMeta(type):
+ """Metaclass for the ModelService client.
+
+ This provides class-level methods for building and retrieving
+ support objects (e.g. transport) without polluting the client instance
+ objects.
+ """
+
+ _transport_registry = OrderedDict() # type: Dict[str, Type[ModelServiceTransport]]
+ _transport_registry["grpc"] = ModelServiceGrpcTransport
+ _transport_registry["grpc_asyncio"] = ModelServiceGrpcAsyncIOTransport
+
+ def get_transport_class(cls, label: str = None,) -> Type[ModelServiceTransport]:
+ """Return an appropriate transport class.
+
+ Args:
+ label: The name of the desired transport. If none is
+ provided, then the first transport in the registry is used.
+
+ Returns:
+ The transport class to use.
+ """
+ # If a specific transport is requested, return that one.
+ if label:
+ return cls._transport_registry[label]
+
+ # No transport is requested; return the default (that is, the first one
+ # in the dictionary).
+ return next(iter(cls._transport_registry.values()))
+
+
+class ModelServiceClient(metaclass=ModelServiceClientMeta):
+ """A service for managing AI Platform's machine learning Models."""
+
+ @staticmethod
+ def _get_default_mtls_endpoint(api_endpoint):
+ """Convert api endpoint to mTLS endpoint.
+ Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
+ "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
+ Args:
+ api_endpoint (Optional[str]): the api endpoint to convert.
+ Returns:
+ str: converted mTLS api endpoint.
+ """
+ if not api_endpoint:
+ return api_endpoint
+
+ mtls_endpoint_re = re.compile(
+ r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?"
+ )
+
+ m = mtls_endpoint_re.match(api_endpoint)
+ name, mtls, sandbox, googledomain = m.groups()
+ if mtls or not googledomain:
+ return api_endpoint
+
+ if sandbox:
+ return api_endpoint.replace(
+ "sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
+ )
+
+ return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
+
+ DEFAULT_ENDPOINT = "aiplatform.googleapis.com"
+ DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
+ DEFAULT_ENDPOINT
+ )
+
+ @classmethod
+ def from_service_account_info(cls, info: dict, *args, **kwargs):
+ """Creates an instance of this client using the provided credentials info.
+
+ Args:
+ info (dict): The service account private key info.
+ args: Additional arguments to pass to the constructor.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ ModelServiceClient: The constructed client.
+ """
+ credentials = service_account.Credentials.from_service_account_info(info)
+ kwargs["credentials"] = credentials
+ return cls(*args, **kwargs)
+
+ @classmethod
+ def from_service_account_file(cls, filename: str, *args, **kwargs):
+ """Creates an instance of this client using the provided credentials
+ file.
+
+ Args:
+ filename (str): The path to the service account private key json
+ file.
+ args: Additional arguments to pass to the constructor.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ ModelServiceClient: The constructed client.
+ """
+ credentials = service_account.Credentials.from_service_account_file(filename)
+ kwargs["credentials"] = credentials
+ return cls(*args, **kwargs)
+
+ from_service_account_json = from_service_account_file
+
+ @property
+ def transport(self) -> ModelServiceTransport:
+ """Return the transport used by the client instance.
+
+ Returns:
+ ModelServiceTransport: The transport used by the client instance.
+ """
+ return self._transport
+
+ @staticmethod
+ def endpoint_path(project: str, location: str, endpoint: str,) -> str:
+ """Return a fully-qualified endpoint string."""
+ return "projects/{project}/locations/{location}/endpoints/{endpoint}".format(
+ project=project, location=location, endpoint=endpoint,
+ )
+
+ @staticmethod
+ def parse_endpoint_path(path: str) -> Dict[str, str]:
+ """Parse a endpoint path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def model_path(project: str, location: str, model: str,) -> str:
+ """Return a fully-qualified model string."""
+ return "projects/{project}/locations/{location}/models/{model}".format(
+ project=project, location=location, model=model,
+ )
+
+ @staticmethod
+ def parse_model_path(path: str) -> Dict[str, str]:
+ """Parse a model path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def model_evaluation_path(
+ project: str, location: str, model: str, evaluation: str,
+ ) -> str:
+ """Return a fully-qualified model_evaluation string."""
+ return "projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}".format(
+ project=project, location=location, model=model, evaluation=evaluation,
+ )
+
+ @staticmethod
+ def parse_model_evaluation_path(path: str) -> Dict[str, str]:
+ """Parse a model_evaluation path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)/evaluations/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def model_evaluation_slice_path(
+ project: str, location: str, model: str, evaluation: str, slice: str,
+ ) -> str:
+ """Return a fully-qualified model_evaluation_slice string."""
+ return "projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}".format(
+ project=project,
+ location=location,
+ model=model,
+ evaluation=evaluation,
+ slice=slice,
+ )
+
+ @staticmethod
+ def parse_model_evaluation_slice_path(path: str) -> Dict[str, str]:
+ """Parse a model_evaluation_slice path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)/evaluations/(?P.+?)/slices/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def training_pipeline_path(
+ project: str, location: str, training_pipeline: str,
+ ) -> str:
+ """Return a fully-qualified training_pipeline string."""
+ return "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format(
+ project=project, location=location, training_pipeline=training_pipeline,
+ )
+
+ @staticmethod
+ def parse_training_pipeline_path(path: str) -> Dict[str, str]:
+ """Parse a training_pipeline path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/locations/(?P.+?)/trainingPipelines/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_billing_account_path(billing_account: str,) -> str:
+ """Return a fully-qualified billing_account string."""
+ return "billingAccounts/{billing_account}".format(
+ billing_account=billing_account,
+ )
+
+ @staticmethod
+ def parse_common_billing_account_path(path: str) -> Dict[str, str]:
+ """Parse a billing_account path into its component segments."""
+ m = re.match(r"^billingAccounts/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_folder_path(folder: str,) -> str:
+ """Return a fully-qualified folder string."""
+ return "folders/{folder}".format(folder=folder,)
+
+ @staticmethod
+ def parse_common_folder_path(path: str) -> Dict[str, str]:
+ """Parse a folder path into its component segments."""
+ m = re.match(r"^folders/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_organization_path(organization: str,) -> str:
+ """Return a fully-qualified organization string."""
+ return "organizations/{organization}".format(organization=organization,)
+
+ @staticmethod
+ def parse_common_organization_path(path: str) -> Dict[str, str]:
+ """Parse a organization path into its component segments."""
+ m = re.match(r"^organizations/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_project_path(project: str,) -> str:
+ """Return a fully-qualified project string."""
+ return "projects/{project}".format(project=project,)
+
+ @staticmethod
+ def parse_common_project_path(path: str) -> Dict[str, str]:
+ """Parse a project path into its component segments."""
+ m = re.match(r"^projects/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_location_path(project: str, location: str,) -> str:
+ """Return a fully-qualified location string."""
+ return "projects/{project}/locations/{location}".format(
+ project=project, location=location,
+ )
+
+ @staticmethod
+ def parse_common_location_path(path: str) -> Dict[str, str]:
+ """Parse a location path into its component segments."""
+ m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ def __init__(
+ self,
+ *,
+ credentials: Optional[credentials.Credentials] = None,
+ transport: Union[str, ModelServiceTransport, None] = None,
+ client_options: Optional[client_options_lib.ClientOptions] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ ) -> None:
+ """Instantiate the model service client.
+
+ Args:
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ transport (Union[str, ModelServiceTransport]): The
+ transport to use. If set to None, a transport is chosen
+ automatically.
+ client_options (google.api_core.client_options.ClientOptions): Custom options for the
+ client. It won't take effect if a ``transport`` instance is provided.
+ (1) The ``api_endpoint`` property can be used to override the
+ default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
+ environment variable can also be used to override the endpoint:
+ "always" (always use the default mTLS endpoint), "never" (always
+ use the default regular endpoint) and "auto" (auto switch to the
+ default mTLS endpoint if client certificate is present, this is
+ the default value). However, the ``api_endpoint`` property takes
+ precedence if provided.
+ (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
+ is "true", then the ``client_cert_source`` property can be used
+ to provide client certificate for mutual TLS transport. If
+ not provided, the default SSL client certificate will be used if
+ present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
+ set, no client certificate will be used.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+
+ Raises:
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
+ creation failed for any reason.
+ """
+ if isinstance(client_options, dict):
+ client_options = client_options_lib.from_dict(client_options)
+ if client_options is None:
+ client_options = client_options_lib.ClientOptions()
+
+ # Create SSL credentials for mutual TLS if needed.
+ use_client_cert = bool(
+ util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
+ )
+
+ client_cert_source_func = None
+ is_mtls = False
+ if use_client_cert:
+ if client_options.client_cert_source:
+ is_mtls = True
+ client_cert_source_func = client_options.client_cert_source
+ else:
+ is_mtls = mtls.has_default_client_cert_source()
+ client_cert_source_func = (
+ mtls.default_client_cert_source() if is_mtls else None
+ )
+
+ # Figure out which api endpoint to use.
+ if client_options.api_endpoint is not None:
+ api_endpoint = client_options.api_endpoint
+ else:
+ use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
+ if use_mtls_env == "never":
+ api_endpoint = self.DEFAULT_ENDPOINT
+ elif use_mtls_env == "always":
+ api_endpoint = self.DEFAULT_MTLS_ENDPOINT
+ elif use_mtls_env == "auto":
+ api_endpoint = (
+ self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT
+ )
+ else:
+ raise MutualTLSChannelError(
+ "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
+ )
+
+ # Save or instantiate the transport.
+ # Ordinarily, we provide the transport, but allowing a custom transport
+ # instance provides an extensibility point for unusual situations.
+ if isinstance(transport, ModelServiceTransport):
+ # transport is a ModelServiceTransport instance.
+ if credentials or client_options.credentials_file:
+ raise ValueError(
+ "When providing a transport instance, "
+ "provide its credentials directly."
+ )
+ if client_options.scopes:
+ raise ValueError(
+ "When providing a transport instance, "
+ "provide its scopes directly."
+ )
+ self._transport = transport
+ else:
+ Transport = type(self).get_transport_class(transport)
+ self._transport = Transport(
+ credentials=credentials,
+ credentials_file=client_options.credentials_file,
+ host=api_endpoint,
+ scopes=client_options.scopes,
+ client_cert_source_for_mtls=client_cert_source_func,
+ quota_project_id=client_options.quota_project_id,
+ client_info=client_info,
+ )
+
+ def upload_model(
+ self,
+ request: model_service.UploadModelRequest = None,
+ *,
+ parent: str = None,
+ model: gca_model.Model = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> ga_operation.Operation:
+ r"""Uploads a Model artifact into AI Platform.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.UploadModelRequest):
+ The request object. Request message for
+ ``ModelService.UploadModel``.
+ parent (str):
+ Required. The resource name of the Location into which
+ to upload the Model. Format:
+ ``projects/{project}/locations/{location}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ model (google.cloud.aiplatform_v1.types.Model):
+ Required. The Model to create.
+ This corresponds to the ``model`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation.Operation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:`google.cloud.aiplatform_v1.types.UploadModelResponse`
+ Response message of
+ ``ModelService.UploadModel``
+ operation.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent, model])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a model_service.UploadModelRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, model_service.UploadModelRequest):
+ request = model_service.UploadModelRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+ if model is not None:
+ request.model = model
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.upload_model]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = ga_operation.from_gapic(
+ response,
+ self._transport.operations_client,
+ model_service.UploadModelResponse,
+ metadata_type=model_service.UploadModelOperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def get_model(
+ self,
+ request: model_service.GetModelRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> model.Model:
+ r"""Gets a Model.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.GetModelRequest):
+ The request object. Request message for
+ ``ModelService.GetModel``.
+ name (str):
+ Required. The name of the Model resource. Format:
+ ``projects/{project}/locations/{location}/models/{model}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.types.Model:
+ A trained machine learning Model.
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a model_service.GetModelRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, model_service.GetModelRequest):
+ request = model_service.GetModelRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.get_model]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ def list_models(
+ self,
+ request: model_service.ListModelsRequest = None,
+ *,
+ parent: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> pagers.ListModelsPager:
+ r"""Lists Models in a Location.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.ListModelsRequest):
+ The request object. Request message for
+ ``ModelService.ListModels``.
+ parent (str):
+ Required. The resource name of the Location to list the
+ Models from. Format:
+ ``projects/{project}/locations/{location}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.services.model_service.pagers.ListModelsPager:
+ Response message for
+ ``ModelService.ListModels``
+
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a model_service.ListModelsRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, model_service.ListModelsRequest):
+ request = model_service.ListModelsRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.list_models]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__iter__` convenience method.
+ response = pagers.ListModelsPager(
+ method=rpc, request=request, response=response, metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def update_model(
+ self,
+ request: model_service.UpdateModelRequest = None,
+ *,
+ model: gca_model.Model = None,
+ update_mask: field_mask.FieldMask = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> gca_model.Model:
+ r"""Updates a Model.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.UpdateModelRequest):
+ The request object. Request message for
+ ``ModelService.UpdateModel``.
+ model (google.cloud.aiplatform_v1.types.Model):
+ Required. The Model which replaces
+ the resource on the server.
+
+ This corresponds to the ``model`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ update_mask (google.protobuf.field_mask_pb2.FieldMask):
+ Required. The update mask applies to the resource. For
+ the ``FieldMask`` definition, see
+ `FieldMask `__.
+
+ This corresponds to the ``update_mask`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.types.Model:
+ A trained machine learning Model.
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([model, update_mask])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a model_service.UpdateModelRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, model_service.UpdateModelRequest):
+ request = model_service.UpdateModelRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if model is not None:
+ request.model = model
+ if update_mask is not None:
+ request.update_mask = update_mask
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.update_model]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata(
+ (("model.name", request.model.name),)
+ ),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ def delete_model(
+ self,
+ request: model_service.DeleteModelRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> ga_operation.Operation:
+ r"""Deletes a Model.
+ Note: Model can only be deleted if there are no
+ DeployedModels created from it.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.DeleteModelRequest):
+ The request object. Request message for
+ ``ModelService.DeleteModel``.
+ name (str):
+ Required. The name of the Model resource to be deleted.
+ Format:
+ ``projects/{project}/locations/{location}/models/{model}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation.Operation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
+ empty messages in your APIs. A typical example is to
+ use it as the request or the response type of an API
+ method. For instance:
+
+ service Foo {
+ rpc Bar(google.protobuf.Empty) returns
+ (google.protobuf.Empty);
+
+ }
+
+ The JSON representation for Empty is empty JSON
+ object {}.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a model_service.DeleteModelRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, model_service.DeleteModelRequest):
+ request = model_service.DeleteModelRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.delete_model]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = ga_operation.from_gapic(
+ response,
+ self._transport.operations_client,
+ empty.Empty,
+ metadata_type=gca_operation.DeleteOperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def export_model(
+ self,
+ request: model_service.ExportModelRequest = None,
+ *,
+ name: str = None,
+ output_config: model_service.ExportModelRequest.OutputConfig = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> ga_operation.Operation:
+ r"""Exports a trained, exportable, Model to a location specified by
+ the user. A Model is considered to be exportable if it has at
+ least one [supported export
+ format][google.cloud.aiplatform.v1.Model.supported_export_formats].
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.ExportModelRequest):
+ The request object. Request message for
+ ``ModelService.ExportModel``.
+ name (str):
+ Required. The resource name of the Model to export.
+ Format:
+ ``projects/{project}/locations/{location}/models/{model}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ output_config (google.cloud.aiplatform_v1.types.ExportModelRequest.OutputConfig):
+ Required. The desired output location
+ and configuration.
+
+ This corresponds to the ``output_config`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation.Operation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:`google.cloud.aiplatform_v1.types.ExportModelResponse`
+ Response message of
+ ``ModelService.ExportModel``
+ operation.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name, output_config])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a model_service.ExportModelRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, model_service.ExportModelRequest):
+ request = model_service.ExportModelRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+ if output_config is not None:
+ request.output_config = output_config
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.export_model]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = ga_operation.from_gapic(
+ response,
+ self._transport.operations_client,
+ model_service.ExportModelResponse,
+ metadata_type=model_service.ExportModelOperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def get_model_evaluation(
+ self,
+ request: model_service.GetModelEvaluationRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> model_evaluation.ModelEvaluation:
+ r"""Gets a ModelEvaluation.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.GetModelEvaluationRequest):
+ The request object. Request message for
+ ``ModelService.GetModelEvaluation``.
+ name (str):
+ Required. The name of the ModelEvaluation resource.
+ Format:
+
+ ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.types.ModelEvaluation:
+ A collection of metrics calculated by
+ comparing Model's predictions on all of
+ the test data against annotations from
+ the test data.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a model_service.GetModelEvaluationRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, model_service.GetModelEvaluationRequest):
+ request = model_service.GetModelEvaluationRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.get_model_evaluation]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ def list_model_evaluations(
+ self,
+ request: model_service.ListModelEvaluationsRequest = None,
+ *,
+ parent: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> pagers.ListModelEvaluationsPager:
+ r"""Lists ModelEvaluations in a Model.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.ListModelEvaluationsRequest):
+ The request object. Request message for
+ ``ModelService.ListModelEvaluations``.
+ parent (str):
+ Required. The resource name of the Model to list the
+ ModelEvaluations from. Format:
+ ``projects/{project}/locations/{location}/models/{model}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.services.model_service.pagers.ListModelEvaluationsPager:
+ Response message for
+ ``ModelService.ListModelEvaluations``.
+
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a model_service.ListModelEvaluationsRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, model_service.ListModelEvaluationsRequest):
+ request = model_service.ListModelEvaluationsRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.list_model_evaluations]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__iter__` convenience method.
+ response = pagers.ListModelEvaluationsPager(
+ method=rpc, request=request, response=response, metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def get_model_evaluation_slice(
+ self,
+ request: model_service.GetModelEvaluationSliceRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> model_evaluation_slice.ModelEvaluationSlice:
+ r"""Gets a ModelEvaluationSlice.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.GetModelEvaluationSliceRequest):
+ The request object. Request message for
+ ``ModelService.GetModelEvaluationSlice``.
+ name (str):
+ Required. The name of the ModelEvaluationSlice resource.
+ Format:
+
+ ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.types.ModelEvaluationSlice:
+ A collection of metrics calculated by
+ comparing Model's predictions on a slice
+ of the test data against ground truth
+ annotations.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a model_service.GetModelEvaluationSliceRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, model_service.GetModelEvaluationSliceRequest):
+ request = model_service.GetModelEvaluationSliceRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[
+ self._transport.get_model_evaluation_slice
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ def list_model_evaluation_slices(
+ self,
+ request: model_service.ListModelEvaluationSlicesRequest = None,
+ *,
+ parent: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> pagers.ListModelEvaluationSlicesPager:
+ r"""Lists ModelEvaluationSlices in a ModelEvaluation.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesRequest):
+ The request object. Request message for
+ ``ModelService.ListModelEvaluationSlices``.
+ parent (str):
+ Required. The resource name of the ModelEvaluation to
+ list the ModelEvaluationSlices from. Format:
+
+ ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.services.model_service.pagers.ListModelEvaluationSlicesPager:
+ Response message for
+ ``ModelService.ListModelEvaluationSlices``.
+
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a model_service.ListModelEvaluationSlicesRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, model_service.ListModelEvaluationSlicesRequest):
+ request = model_service.ListModelEvaluationSlicesRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[
+ self._transport.list_model_evaluation_slices
+ ]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__iter__` convenience method.
+ response = pagers.ListModelEvaluationSlicesPager(
+ method=rpc, request=request, response=response, metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+
+try:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=pkg_resources.get_distribution(
+ "google-cloud-aiplatform",
+ ).version,
+ )
+except pkg_resources.DistributionNotFound:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
+
+
+__all__ = ("ModelServiceClient",)
diff --git a/google/cloud/aiplatform_v1/services/model_service/pagers.py b/google/cloud/aiplatform_v1/services/model_service/pagers.py
new file mode 100644
index 0000000000..be652f745f
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/model_service/pagers.py
@@ -0,0 +1,411 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple
+
+from google.cloud.aiplatform_v1.types import model
+from google.cloud.aiplatform_v1.types import model_evaluation
+from google.cloud.aiplatform_v1.types import model_evaluation_slice
+from google.cloud.aiplatform_v1.types import model_service
+
+
+class ListModelsPager:
+ """A pager for iterating through ``list_models`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.cloud.aiplatform_v1.types.ListModelsResponse` object, and
+ provides an ``__iter__`` method to iterate through its
+ ``models`` field.
+
+ If there are more pages, the ``__iter__`` method will make additional
+ ``ListModels`` requests and continue to iterate
+ through the ``models`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.cloud.aiplatform_v1.types.ListModelsResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[..., model_service.ListModelsResponse],
+ request: model_service.ListModelsRequest,
+ response: model_service.ListModelsResponse,
+ *,
+ metadata: Sequence[Tuple[str, str]] = ()
+ ):
+ """Instantiate the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.cloud.aiplatform_v1.types.ListModelsRequest):
+ The initial request object.
+ response (google.cloud.aiplatform_v1.types.ListModelsResponse):
+ The initial response object.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ self._method = method
+ self._request = model_service.ListModelsRequest(request)
+ self._response = response
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ def pages(self) -> Iterable[model_service.ListModelsResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = self._method(self._request, metadata=self._metadata)
+ yield self._response
+
+ def __iter__(self) -> Iterable[model.Model]:
+ for page in self.pages:
+ yield from page.models
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
+
+
+class ListModelsAsyncPager:
+ """A pager for iterating through ``list_models`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.cloud.aiplatform_v1.types.ListModelsResponse` object, and
+ provides an ``__aiter__`` method to iterate through its
+ ``models`` field.
+
+ If there are more pages, the ``__aiter__`` method will make additional
+ ``ListModels`` requests and continue to iterate
+ through the ``models`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.cloud.aiplatform_v1.types.ListModelsResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[..., Awaitable[model_service.ListModelsResponse]],
+ request: model_service.ListModelsRequest,
+ response: model_service.ListModelsResponse,
+ *,
+ metadata: Sequence[Tuple[str, str]] = ()
+ ):
+ """Instantiate the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.cloud.aiplatform_v1.types.ListModelsRequest):
+ The initial request object.
+ response (google.cloud.aiplatform_v1.types.ListModelsResponse):
+ The initial response object.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ self._method = method
+ self._request = model_service.ListModelsRequest(request)
+ self._response = response
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ async def pages(self) -> AsyncIterable[model_service.ListModelsResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = await self._method(self._request, metadata=self._metadata)
+ yield self._response
+
+ def __aiter__(self) -> AsyncIterable[model.Model]:
+ async def async_generator():
+ async for page in self.pages:
+ for response in page.models:
+ yield response
+
+ return async_generator()
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
+
+
+class ListModelEvaluationsPager:
+ """A pager for iterating through ``list_model_evaluations`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationsResponse` object, and
+ provides an ``__iter__`` method to iterate through its
+ ``model_evaluations`` field.
+
+ If there are more pages, the ``__iter__`` method will make additional
+ ``ListModelEvaluations`` requests and continue to iterate
+ through the ``model_evaluations`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationsResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[..., model_service.ListModelEvaluationsResponse],
+ request: model_service.ListModelEvaluationsRequest,
+ response: model_service.ListModelEvaluationsResponse,
+ *,
+ metadata: Sequence[Tuple[str, str]] = ()
+ ):
+ """Instantiate the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.cloud.aiplatform_v1.types.ListModelEvaluationsRequest):
+ The initial request object.
+ response (google.cloud.aiplatform_v1.types.ListModelEvaluationsResponse):
+ The initial response object.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ self._method = method
+ self._request = model_service.ListModelEvaluationsRequest(request)
+ self._response = response
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ def pages(self) -> Iterable[model_service.ListModelEvaluationsResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = self._method(self._request, metadata=self._metadata)
+ yield self._response
+
+ def __iter__(self) -> Iterable[model_evaluation.ModelEvaluation]:
+ for page in self.pages:
+ yield from page.model_evaluations
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
+
+
+class ListModelEvaluationsAsyncPager:
+ """A pager for iterating through ``list_model_evaluations`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationsResponse` object, and
+ provides an ``__aiter__`` method to iterate through its
+ ``model_evaluations`` field.
+
+ If there are more pages, the ``__aiter__`` method will make additional
+ ``ListModelEvaluations`` requests and continue to iterate
+ through the ``model_evaluations`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationsResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[..., Awaitable[model_service.ListModelEvaluationsResponse]],
+ request: model_service.ListModelEvaluationsRequest,
+ response: model_service.ListModelEvaluationsResponse,
+ *,
+ metadata: Sequence[Tuple[str, str]] = ()
+ ):
+ """Instantiate the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.cloud.aiplatform_v1.types.ListModelEvaluationsRequest):
+ The initial request object.
+ response (google.cloud.aiplatform_v1.types.ListModelEvaluationsResponse):
+ The initial response object.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ self._method = method
+ self._request = model_service.ListModelEvaluationsRequest(request)
+ self._response = response
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ async def pages(self) -> AsyncIterable[model_service.ListModelEvaluationsResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = await self._method(self._request, metadata=self._metadata)
+ yield self._response
+
+ def __aiter__(self) -> AsyncIterable[model_evaluation.ModelEvaluation]:
+ async def async_generator():
+ async for page in self.pages:
+ for response in page.model_evaluations:
+ yield response
+
+ return async_generator()
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
+
+
+class ListModelEvaluationSlicesPager:
+ """A pager for iterating through ``list_model_evaluation_slices`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesResponse` object, and
+ provides an ``__iter__`` method to iterate through its
+ ``model_evaluation_slices`` field.
+
+ If there are more pages, the ``__iter__`` method will make additional
+ ``ListModelEvaluationSlices`` requests and continue to iterate
+ through the ``model_evaluation_slices`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[..., model_service.ListModelEvaluationSlicesResponse],
+ request: model_service.ListModelEvaluationSlicesRequest,
+ response: model_service.ListModelEvaluationSlicesResponse,
+ *,
+ metadata: Sequence[Tuple[str, str]] = ()
+ ):
+ """Instantiate the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesRequest):
+ The initial request object.
+ response (google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesResponse):
+ The initial response object.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ self._method = method
+ self._request = model_service.ListModelEvaluationSlicesRequest(request)
+ self._response = response
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ def pages(self) -> Iterable[model_service.ListModelEvaluationSlicesResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = self._method(self._request, metadata=self._metadata)
+ yield self._response
+
+ def __iter__(self) -> Iterable[model_evaluation_slice.ModelEvaluationSlice]:
+ for page in self.pages:
+ yield from page.model_evaluation_slices
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
+
+
+class ListModelEvaluationSlicesAsyncPager:
+ """A pager for iterating through ``list_model_evaluation_slices`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesResponse` object, and
+ provides an ``__aiter__`` method to iterate through its
+ ``model_evaluation_slices`` field.
+
+ If there are more pages, the ``__aiter__`` method will make additional
+ ``ListModelEvaluationSlices`` requests and continue to iterate
+ through the ``model_evaluation_slices`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[
+ ..., Awaitable[model_service.ListModelEvaluationSlicesResponse]
+ ],
+ request: model_service.ListModelEvaluationSlicesRequest,
+ response: model_service.ListModelEvaluationSlicesResponse,
+ *,
+ metadata: Sequence[Tuple[str, str]] = ()
+ ):
+ """Instantiate the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesRequest):
+ The initial request object.
+ response (google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesResponse):
+ The initial response object.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ self._method = method
+ self._request = model_service.ListModelEvaluationSlicesRequest(request)
+ self._response = response
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ async def pages(
+ self,
+ ) -> AsyncIterable[model_service.ListModelEvaluationSlicesResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = await self._method(self._request, metadata=self._metadata)
+ yield self._response
+
+ def __aiter__(self) -> AsyncIterable[model_evaluation_slice.ModelEvaluationSlice]:
+ async def async_generator():
+ async for page in self.pages:
+ for response in page.model_evaluation_slices:
+ yield response
+
+ return async_generator()
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
diff --git a/google/cloud/aiplatform_v1/services/model_service/transports/__init__.py b/google/cloud/aiplatform_v1/services/model_service/transports/__init__.py
new file mode 100644
index 0000000000..5d1cb51abc
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/model_service/transports/__init__.py
@@ -0,0 +1,35 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from collections import OrderedDict
+from typing import Dict, Type
+
+from .base import ModelServiceTransport
+from .grpc import ModelServiceGrpcTransport
+from .grpc_asyncio import ModelServiceGrpcAsyncIOTransport
+
+
+# Compile a registry of transports.
+_transport_registry = OrderedDict() # type: Dict[str, Type[ModelServiceTransport]]
+_transport_registry["grpc"] = ModelServiceGrpcTransport
+_transport_registry["grpc_asyncio"] = ModelServiceGrpcAsyncIOTransport
+
+__all__ = (
+ "ModelServiceTransport",
+ "ModelServiceGrpcTransport",
+ "ModelServiceGrpcAsyncIOTransport",
+)
diff --git a/google/cloud/aiplatform_v1/services/model_service/transports/base.py b/google/cloud/aiplatform_v1/services/model_service/transports/base.py
new file mode 100644
index 0000000000..d937f09a61
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/model_service/transports/base.py
@@ -0,0 +1,266 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import abc
+import typing
+import pkg_resources
+
+from google import auth # type: ignore
+from google.api_core import exceptions # type: ignore
+from google.api_core import gapic_v1 # type: ignore
+from google.api_core import retry as retries # type: ignore
+from google.api_core import operations_v1 # type: ignore
+from google.auth import credentials # type: ignore
+
+from google.cloud.aiplatform_v1.types import model
+from google.cloud.aiplatform_v1.types import model as gca_model
+from google.cloud.aiplatform_v1.types import model_evaluation
+from google.cloud.aiplatform_v1.types import model_evaluation_slice
+from google.cloud.aiplatform_v1.types import model_service
+from google.longrunning import operations_pb2 as operations # type: ignore
+
+
+try:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=pkg_resources.get_distribution(
+ "google-cloud-aiplatform",
+ ).version,
+ )
+except pkg_resources.DistributionNotFound:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
+
+
+class ModelServiceTransport(abc.ABC):
+ """Abstract transport class for ModelService."""
+
+ AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
+
+ def __init__(
+ self,
+ *,
+ host: str = "aiplatform.googleapis.com",
+ credentials: credentials.Credentials = None,
+ credentials_file: typing.Optional[str] = None,
+ scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES,
+ quota_project_id: typing.Optional[str] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ **kwargs,
+ ) -> None:
+ """Instantiate the transport.
+
+ Args:
+ host (Optional[str]): The hostname to connect to.
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is mutually exclusive with credentials.
+ scope (Optional[Sequence[str]]): A list of scopes.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+ """
+ # Save the hostname. Default to port 443 (HTTPS) if none is specified.
+ if ":" not in host:
+ host += ":443"
+ self._host = host
+
+ # If no credentials are provided, then determine the appropriate
+ # defaults.
+ if credentials and credentials_file:
+ raise exceptions.DuplicateCredentialArgs(
+ "'credentials_file' and 'credentials' are mutually exclusive"
+ )
+
+ if credentials_file is not None:
+ credentials, _ = auth.load_credentials_from_file(
+ credentials_file, scopes=scopes, quota_project_id=quota_project_id
+ )
+
+ elif credentials is None:
+ credentials, _ = auth.default(
+ scopes=scopes, quota_project_id=quota_project_id
+ )
+
+ # Save the credentials.
+ self._credentials = credentials
+
+ # Lifted into its own function so it can be stubbed out during tests.
+ self._prep_wrapped_messages(client_info)
+
+ def _prep_wrapped_messages(self, client_info):
+ # Precompute the wrapped methods.
+ self._wrapped_methods = {
+ self.upload_model: gapic_v1.method.wrap_method(
+ self.upload_model, default_timeout=None, client_info=client_info,
+ ),
+ self.get_model: gapic_v1.method.wrap_method(
+ self.get_model, default_timeout=None, client_info=client_info,
+ ),
+ self.list_models: gapic_v1.method.wrap_method(
+ self.list_models, default_timeout=None, client_info=client_info,
+ ),
+ self.update_model: gapic_v1.method.wrap_method(
+ self.update_model, default_timeout=None, client_info=client_info,
+ ),
+ self.delete_model: gapic_v1.method.wrap_method(
+ self.delete_model, default_timeout=None, client_info=client_info,
+ ),
+ self.export_model: gapic_v1.method.wrap_method(
+ self.export_model, default_timeout=None, client_info=client_info,
+ ),
+ self.get_model_evaluation: gapic_v1.method.wrap_method(
+ self.get_model_evaluation,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.list_model_evaluations: gapic_v1.method.wrap_method(
+ self.list_model_evaluations,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.get_model_evaluation_slice: gapic_v1.method.wrap_method(
+ self.get_model_evaluation_slice,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.list_model_evaluation_slices: gapic_v1.method.wrap_method(
+ self.list_model_evaluation_slices,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ }
+
+ @property
+ def operations_client(self) -> operations_v1.OperationsClient:
+ """Return the client designed to process long-running operations."""
+ raise NotImplementedError()
+
+ @property
+ def upload_model(
+ self,
+ ) -> typing.Callable[
+ [model_service.UploadModelRequest],
+ typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def get_model(
+ self,
+ ) -> typing.Callable[
+ [model_service.GetModelRequest],
+ typing.Union[model.Model, typing.Awaitable[model.Model]],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def list_models(
+ self,
+ ) -> typing.Callable[
+ [model_service.ListModelsRequest],
+ typing.Union[
+ model_service.ListModelsResponse,
+ typing.Awaitable[model_service.ListModelsResponse],
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def update_model(
+ self,
+ ) -> typing.Callable[
+ [model_service.UpdateModelRequest],
+ typing.Union[gca_model.Model, typing.Awaitable[gca_model.Model]],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def delete_model(
+ self,
+ ) -> typing.Callable[
+ [model_service.DeleteModelRequest],
+ typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def export_model(
+ self,
+ ) -> typing.Callable[
+ [model_service.ExportModelRequest],
+ typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def get_model_evaluation(
+ self,
+ ) -> typing.Callable[
+ [model_service.GetModelEvaluationRequest],
+ typing.Union[
+ model_evaluation.ModelEvaluation,
+ typing.Awaitable[model_evaluation.ModelEvaluation],
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def list_model_evaluations(
+ self,
+ ) -> typing.Callable[
+ [model_service.ListModelEvaluationsRequest],
+ typing.Union[
+ model_service.ListModelEvaluationsResponse,
+ typing.Awaitable[model_service.ListModelEvaluationsResponse],
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def get_model_evaluation_slice(
+ self,
+ ) -> typing.Callable[
+ [model_service.GetModelEvaluationSliceRequest],
+ typing.Union[
+ model_evaluation_slice.ModelEvaluationSlice,
+ typing.Awaitable[model_evaluation_slice.ModelEvaluationSlice],
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def list_model_evaluation_slices(
+ self,
+ ) -> typing.Callable[
+ [model_service.ListModelEvaluationSlicesRequest],
+ typing.Union[
+ model_service.ListModelEvaluationSlicesResponse,
+ typing.Awaitable[model_service.ListModelEvaluationSlicesResponse],
+ ],
+ ]:
+ raise NotImplementedError()
+
+
+__all__ = ("ModelServiceTransport",)
diff --git a/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py
new file mode 100644
index 0000000000..90dcfd008d
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py
@@ -0,0 +1,547 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import warnings
+from typing import Callable, Dict, Optional, Sequence, Tuple
+
+from google.api_core import grpc_helpers # type: ignore
+from google.api_core import operations_v1 # type: ignore
+from google.api_core import gapic_v1 # type: ignore
+from google import auth # type: ignore
+from google.auth import credentials # type: ignore
+from google.auth.transport.grpc import SslCredentials # type: ignore
+
+import grpc # type: ignore
+
+from google.cloud.aiplatform_v1.types import model
+from google.cloud.aiplatform_v1.types import model as gca_model
+from google.cloud.aiplatform_v1.types import model_evaluation
+from google.cloud.aiplatform_v1.types import model_evaluation_slice
+from google.cloud.aiplatform_v1.types import model_service
+from google.longrunning import operations_pb2 as operations # type: ignore
+
+from .base import ModelServiceTransport, DEFAULT_CLIENT_INFO
+
+
+class ModelServiceGrpcTransport(ModelServiceTransport):
+ """gRPC backend transport for ModelService.
+
+ A service for managing AI Platform's machine learning Models.
+
+ This class defines the same methods as the primary client, so the
+ primary client can load the underlying transport implementation
+ and call it.
+
+ It sends protocol buffers over the wire using gRPC (which is built on
+ top of HTTP/2); the ``grpcio`` package must be installed.
+ """
+
+ _stubs: Dict[str, Callable]
+
+ def __init__(
+ self,
+ *,
+ host: str = "aiplatform.googleapis.com",
+ credentials: credentials.Credentials = None,
+ credentials_file: str = None,
+ scopes: Sequence[str] = None,
+ channel: grpc.Channel = None,
+ api_mtls_endpoint: str = None,
+ client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
+ ssl_channel_credentials: grpc.ChannelCredentials = None,
+ client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
+ quota_project_id: Optional[str] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ ) -> None:
+ """Instantiate the transport.
+
+ Args:
+ host (Optional[str]): The hostname to connect to.
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ This argument is ignored if ``channel`` is provided.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is ignored if ``channel`` is provided.
+ scopes (Optional(Sequence[str])): A list of scopes. This argument is
+ ignored if ``channel`` is provided.
+ channel (Optional[grpc.Channel]): A ``Channel`` instance through
+ which to make calls.
+ api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
+ If provided, it overrides the ``host`` argument and tries to create
+ a mutual TLS channel with client SSL credentials from
+ ``client_cert_source`` or applicatin default SSL credentials.
+ client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
+ Deprecated. A callback to provide client SSL certificate bytes and
+ private key bytes, both in PEM format. It is ignored if
+ ``api_mtls_endpoint`` is None.
+ ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
+ for grpc channel. It is ignored if ``channel`` is provided.
+ client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
+ A callback to provide client certificate bytes and private key bytes,
+ both in PEM format. It is used to configure mutual TLS channel. It is
+ ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+
+ Raises:
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
+ creation failed for any reason.
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
+ and ``credentials_file`` are passed.
+ """
+ self._ssl_channel_credentials = ssl_channel_credentials
+
+ if api_mtls_endpoint:
+ warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
+ if client_cert_source:
+ warnings.warn("client_cert_source is deprecated", DeprecationWarning)
+
+ if channel:
+ # Sanity check: Ensure that channel and credentials are not both
+ # provided.
+ credentials = False
+
+ # If a channel was explicitly provided, set it.
+ self._grpc_channel = channel
+ self._ssl_channel_credentials = None
+ elif api_mtls_endpoint:
+ host = (
+ api_mtls_endpoint
+ if ":" in api_mtls_endpoint
+ else api_mtls_endpoint + ":443"
+ )
+
+ if credentials is None:
+ credentials, _ = auth.default(
+ scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
+ )
+
+ # Create SSL credentials with client_cert_source or application
+ # default SSL credentials.
+ if client_cert_source:
+ cert, key = client_cert_source()
+ ssl_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+ else:
+ ssl_credentials = SslCredentials().ssl_credentials
+
+ # create a new channel. The provided one is ignored.
+ self._grpc_channel = type(self).create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ ssl_credentials=ssl_credentials,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ self._ssl_channel_credentials = ssl_credentials
+ else:
+ host = host if ":" in host else host + ":443"
+
+ if credentials is None:
+ credentials, _ = auth.default(
+ scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
+ )
+
+ if client_cert_source_for_mtls and not ssl_channel_credentials:
+ cert, key = client_cert_source_for_mtls()
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+
+ # create a new channel. The provided one is ignored.
+ self._grpc_channel = type(self).create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ ssl_credentials=self._ssl_channel_credentials,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+ self._stubs = {} # type: Dict[str, Callable]
+ self._operations_client = None
+
+ # Run the base constructor.
+ super().__init__(
+ host=host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ client_info=client_info,
+ )
+
+ @classmethod
+ def create_channel(
+ cls,
+ host: str = "aiplatform.googleapis.com",
+ credentials: credentials.Credentials = None,
+ credentials_file: str = None,
+ scopes: Optional[Sequence[str]] = None,
+ quota_project_id: Optional[str] = None,
+ **kwargs,
+ ) -> grpc.Channel:
+ """Create and return a gRPC channel object.
+ Args:
+ address (Optional[str]): The host for the channel to use.
+ credentials (Optional[~.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify this application to the service. If
+ none are specified, the client will attempt to ascertain
+ the credentials from the environment.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is mutually exclusive with credentials.
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
+ service. These are only used when credentials are not specified and
+ are passed to :func:`google.auth.default`.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ kwargs (Optional[dict]): Keyword arguments, which are passed to the
+ channel creation.
+ Returns:
+ grpc.Channel: A gRPC channel object.
+
+ Raises:
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
+ and ``credentials_file`` are passed.
+ """
+ scopes = scopes or cls.AUTH_SCOPES
+ return grpc_helpers.create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes,
+ quota_project_id=quota_project_id,
+ **kwargs,
+ )
+
+ @property
+ def grpc_channel(self) -> grpc.Channel:
+ """Return the channel designed to connect to this service.
+ """
+ return self._grpc_channel
+
+ @property
+ def operations_client(self) -> operations_v1.OperationsClient:
+ """Create the client designed to process long-running operations.
+
+ This property caches on the instance; repeated calls return the same
+ client.
+ """
+ # Sanity check: Only create a new client if we do not already have one.
+ if self._operations_client is None:
+ self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
+
+ # Return the client from cache.
+ return self._operations_client
+
+ @property
+ def upload_model(
+ self,
+ ) -> Callable[[model_service.UploadModelRequest], operations.Operation]:
+ r"""Return a callable for the upload model method over gRPC.
+
+ Uploads a Model artifact into AI Platform.
+
+ Returns:
+ Callable[[~.UploadModelRequest],
+ ~.Operation]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "upload_model" not in self._stubs:
+ self._stubs["upload_model"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.ModelService/UploadModel",
+ request_serializer=model_service.UploadModelRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["upload_model"]
+
+ @property
+ def get_model(self) -> Callable[[model_service.GetModelRequest], model.Model]:
+ r"""Return a callable for the get model method over gRPC.
+
+ Gets a Model.
+
+ Returns:
+ Callable[[~.GetModelRequest],
+ ~.Model]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "get_model" not in self._stubs:
+ self._stubs["get_model"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.ModelService/GetModel",
+ request_serializer=model_service.GetModelRequest.serialize,
+ response_deserializer=model.Model.deserialize,
+ )
+ return self._stubs["get_model"]
+
+ @property
+ def list_models(
+ self,
+ ) -> Callable[[model_service.ListModelsRequest], model_service.ListModelsResponse]:
+ r"""Return a callable for the list models method over gRPC.
+
+ Lists Models in a Location.
+
+ Returns:
+ Callable[[~.ListModelsRequest],
+ ~.ListModelsResponse]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_models" not in self._stubs:
+ self._stubs["list_models"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.ModelService/ListModels",
+ request_serializer=model_service.ListModelsRequest.serialize,
+ response_deserializer=model_service.ListModelsResponse.deserialize,
+ )
+ return self._stubs["list_models"]
+
+ @property
+ def update_model(
+ self,
+ ) -> Callable[[model_service.UpdateModelRequest], gca_model.Model]:
+ r"""Return a callable for the update model method over gRPC.
+
+ Updates a Model.
+
+ Returns:
+ Callable[[~.UpdateModelRequest],
+ ~.Model]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "update_model" not in self._stubs:
+ self._stubs["update_model"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.ModelService/UpdateModel",
+ request_serializer=model_service.UpdateModelRequest.serialize,
+ response_deserializer=gca_model.Model.deserialize,
+ )
+ return self._stubs["update_model"]
+
+ @property
+ def delete_model(
+ self,
+ ) -> Callable[[model_service.DeleteModelRequest], operations.Operation]:
+ r"""Return a callable for the delete model method over gRPC.
+
+ Deletes a Model.
+ Note: Model can only be deleted if there are no
+ DeployedModels created from it.
+
+ Returns:
+ Callable[[~.DeleteModelRequest],
+ ~.Operation]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "delete_model" not in self._stubs:
+ self._stubs["delete_model"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.ModelService/DeleteModel",
+ request_serializer=model_service.DeleteModelRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["delete_model"]
+
+ @property
+ def export_model(
+ self,
+ ) -> Callable[[model_service.ExportModelRequest], operations.Operation]:
+ r"""Return a callable for the export model method over gRPC.
+
+ Exports a trained, exportable, Model to a location specified by
+ the user. A Model is considered to be exportable if it has at
+ least one [supported export
+ format][google.cloud.aiplatform.v1.Model.supported_export_formats].
+
+ Returns:
+ Callable[[~.ExportModelRequest],
+ ~.Operation]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "export_model" not in self._stubs:
+ self._stubs["export_model"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.ModelService/ExportModel",
+ request_serializer=model_service.ExportModelRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["export_model"]
+
+ @property
+ def get_model_evaluation(
+ self,
+ ) -> Callable[
+ [model_service.GetModelEvaluationRequest], model_evaluation.ModelEvaluation
+ ]:
+ r"""Return a callable for the get model evaluation method over gRPC.
+
+ Gets a ModelEvaluation.
+
+ Returns:
+ Callable[[~.GetModelEvaluationRequest],
+ ~.ModelEvaluation]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "get_model_evaluation" not in self._stubs:
+ self._stubs["get_model_evaluation"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.ModelService/GetModelEvaluation",
+ request_serializer=model_service.GetModelEvaluationRequest.serialize,
+ response_deserializer=model_evaluation.ModelEvaluation.deserialize,
+ )
+ return self._stubs["get_model_evaluation"]
+
+ @property
+ def list_model_evaluations(
+ self,
+ ) -> Callable[
+ [model_service.ListModelEvaluationsRequest],
+ model_service.ListModelEvaluationsResponse,
+ ]:
+ r"""Return a callable for the list model evaluations method over gRPC.
+
+ Lists ModelEvaluations in a Model.
+
+ Returns:
+ Callable[[~.ListModelEvaluationsRequest],
+ ~.ListModelEvaluationsResponse]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_model_evaluations" not in self._stubs:
+ self._stubs["list_model_evaluations"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.ModelService/ListModelEvaluations",
+ request_serializer=model_service.ListModelEvaluationsRequest.serialize,
+ response_deserializer=model_service.ListModelEvaluationsResponse.deserialize,
+ )
+ return self._stubs["list_model_evaluations"]
+
+ @property
+ def get_model_evaluation_slice(
+ self,
+ ) -> Callable[
+ [model_service.GetModelEvaluationSliceRequest],
+ model_evaluation_slice.ModelEvaluationSlice,
+ ]:
+ r"""Return a callable for the get model evaluation slice method over gRPC.
+
+ Gets a ModelEvaluationSlice.
+
+ Returns:
+ Callable[[~.GetModelEvaluationSliceRequest],
+ ~.ModelEvaluationSlice]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "get_model_evaluation_slice" not in self._stubs:
+ self._stubs["get_model_evaluation_slice"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.ModelService/GetModelEvaluationSlice",
+ request_serializer=model_service.GetModelEvaluationSliceRequest.serialize,
+ response_deserializer=model_evaluation_slice.ModelEvaluationSlice.deserialize,
+ )
+ return self._stubs["get_model_evaluation_slice"]
+
+ @property
+ def list_model_evaluation_slices(
+ self,
+ ) -> Callable[
+ [model_service.ListModelEvaluationSlicesRequest],
+ model_service.ListModelEvaluationSlicesResponse,
+ ]:
+ r"""Return a callable for the list model evaluation slices method over gRPC.
+
+ Lists ModelEvaluationSlices in a ModelEvaluation.
+
+ Returns:
+ Callable[[~.ListModelEvaluationSlicesRequest],
+ ~.ListModelEvaluationSlicesResponse]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_model_evaluation_slices" not in self._stubs:
+ self._stubs["list_model_evaluation_slices"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.ModelService/ListModelEvaluationSlices",
+ request_serializer=model_service.ListModelEvaluationSlicesRequest.serialize,
+ response_deserializer=model_service.ListModelEvaluationSlicesResponse.deserialize,
+ )
+ return self._stubs["list_model_evaluation_slices"]
+
+
+__all__ = ("ModelServiceGrpcTransport",)
diff --git a/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py
new file mode 100644
index 0000000000..2aeffea93f
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py
@@ -0,0 +1,558 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import warnings
+from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple
+
+from google.api_core import gapic_v1 # type: ignore
+from google.api_core import grpc_helpers_async # type: ignore
+from google.api_core import operations_v1 # type: ignore
+from google import auth # type: ignore
+from google.auth import credentials # type: ignore
+from google.auth.transport.grpc import SslCredentials # type: ignore
+
+import grpc # type: ignore
+from grpc.experimental import aio # type: ignore
+
+from google.cloud.aiplatform_v1.types import model
+from google.cloud.aiplatform_v1.types import model as gca_model
+from google.cloud.aiplatform_v1.types import model_evaluation
+from google.cloud.aiplatform_v1.types import model_evaluation_slice
+from google.cloud.aiplatform_v1.types import model_service
+from google.longrunning import operations_pb2 as operations # type: ignore
+
+from .base import ModelServiceTransport, DEFAULT_CLIENT_INFO
+from .grpc import ModelServiceGrpcTransport
+
+
+class ModelServiceGrpcAsyncIOTransport(ModelServiceTransport):
+ """gRPC AsyncIO backend transport for ModelService.
+
+ A service for managing AI Platform's machine learning Models.
+
+ This class defines the same methods as the primary client, so the
+ primary client can load the underlying transport implementation
+ and call it.
+
+ It sends protocol buffers over the wire using gRPC (which is built on
+ top of HTTP/2); the ``grpcio`` package must be installed.
+ """
+
+ _grpc_channel: aio.Channel
+ _stubs: Dict[str, Callable] = {}
+
+ @classmethod
+ def create_channel(
+ cls,
+ host: str = "aiplatform.googleapis.com",
+ credentials: credentials.Credentials = None,
+ credentials_file: Optional[str] = None,
+ scopes: Optional[Sequence[str]] = None,
+ quota_project_id: Optional[str] = None,
+ **kwargs,
+ ) -> aio.Channel:
+ """Create and return a gRPC AsyncIO channel object.
+ Args:
+ address (Optional[str]): The host for the channel to use.
+ credentials (Optional[~.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify this application to the service. If
+ none are specified, the client will attempt to ascertain
+ the credentials from the environment.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is ignored if ``channel`` is provided.
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
+ service. These are only used when credentials are not specified and
+ are passed to :func:`google.auth.default`.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ kwargs (Optional[dict]): Keyword arguments, which are passed to the
+ channel creation.
+ Returns:
+ aio.Channel: A gRPC AsyncIO channel object.
+ """
+ scopes = scopes or cls.AUTH_SCOPES
+ return grpc_helpers_async.create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes,
+ quota_project_id=quota_project_id,
+ **kwargs,
+ )
+
+ def __init__(
+ self,
+ *,
+ host: str = "aiplatform.googleapis.com",
+ credentials: credentials.Credentials = None,
+ credentials_file: Optional[str] = None,
+ scopes: Optional[Sequence[str]] = None,
+ channel: aio.Channel = None,
+ api_mtls_endpoint: str = None,
+ client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
+ ssl_channel_credentials: grpc.ChannelCredentials = None,
+ client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
+ quota_project_id=None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ ) -> None:
+ """Instantiate the transport.
+
+ Args:
+ host (Optional[str]): The hostname to connect to.
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ This argument is ignored if ``channel`` is provided.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is ignored if ``channel`` is provided.
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
+ service. These are only used when credentials are not specified and
+ are passed to :func:`google.auth.default`.
+ channel (Optional[aio.Channel]): A ``Channel`` instance through
+ which to make calls.
+ api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
+ If provided, it overrides the ``host`` argument and tries to create
+ a mutual TLS channel with client SSL credentials from
+ ``client_cert_source`` or applicatin default SSL credentials.
+ client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
+ Deprecated. A callback to provide client SSL certificate bytes and
+ private key bytes, both in PEM format. It is ignored if
+ ``api_mtls_endpoint`` is None.
+ ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
+ for grpc channel. It is ignored if ``channel`` is provided.
+ client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
+ A callback to provide client certificate bytes and private key bytes,
+ both in PEM format. It is used to configure mutual TLS channel. It is
+ ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+
+ Raises:
+ google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
+ creation failed for any reason.
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
+ and ``credentials_file`` are passed.
+ """
+ self._ssl_channel_credentials = ssl_channel_credentials
+
+ if api_mtls_endpoint:
+ warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
+ if client_cert_source:
+ warnings.warn("client_cert_source is deprecated", DeprecationWarning)
+
+ if channel:
+ # Sanity check: Ensure that channel and credentials are not both
+ # provided.
+ credentials = False
+
+ # If a channel was explicitly provided, set it.
+ self._grpc_channel = channel
+ self._ssl_channel_credentials = None
+ elif api_mtls_endpoint:
+ host = (
+ api_mtls_endpoint
+ if ":" in api_mtls_endpoint
+ else api_mtls_endpoint + ":443"
+ )
+
+ if credentials is None:
+ credentials, _ = auth.default(
+ scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
+ )
+
+ # Create SSL credentials with client_cert_source or application
+ # default SSL credentials.
+ if client_cert_source:
+ cert, key = client_cert_source()
+ ssl_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+ else:
+ ssl_credentials = SslCredentials().ssl_credentials
+
+ # create a new channel. The provided one is ignored.
+ self._grpc_channel = type(self).create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ ssl_credentials=ssl_credentials,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ self._ssl_channel_credentials = ssl_credentials
+ else:
+ host = host if ":" in host else host + ":443"
+
+ if credentials is None:
+ credentials, _ = auth.default(
+ scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
+ )
+
+ if client_cert_source_for_mtls and not ssl_channel_credentials:
+ cert, key = client_cert_source_for_mtls()
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+
+ # create a new channel. The provided one is ignored.
+ self._grpc_channel = type(self).create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ ssl_credentials=self._ssl_channel_credentials,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+ # Run the base constructor.
+ super().__init__(
+ host=host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ client_info=client_info,
+ )
+
+ self._stubs = {}
+ self._operations_client = None
+
+ @property
+ def grpc_channel(self) -> aio.Channel:
+ """Create the channel designed to connect to this service.
+
+ This property caches on the instance; repeated calls return
+ the same channel.
+ """
+ # Return the channel from cache.
+ return self._grpc_channel
+
+ @property
+ def operations_client(self) -> operations_v1.OperationsAsyncClient:
+ """Create the client designed to process long-running operations.
+
+ This property caches on the instance; repeated calls return the same
+ client.
+ """
+ # Sanity check: Only create a new client if we do not already have one.
+ if self._operations_client is None:
+ self._operations_client = operations_v1.OperationsAsyncClient(
+ self.grpc_channel
+ )
+
+ # Return the client from cache.
+ return self._operations_client
+
+ @property
+ def upload_model(
+ self,
+ ) -> Callable[[model_service.UploadModelRequest], Awaitable[operations.Operation]]:
+ r"""Return a callable for the upload model method over gRPC.
+
+ Uploads a Model artifact into AI Platform.
+
+ Returns:
+ Callable[[~.UploadModelRequest],
+ Awaitable[~.Operation]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "upload_model" not in self._stubs:
+ self._stubs["upload_model"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.ModelService/UploadModel",
+ request_serializer=model_service.UploadModelRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["upload_model"]
+
+ @property
+ def get_model(
+ self,
+ ) -> Callable[[model_service.GetModelRequest], Awaitable[model.Model]]:
+ r"""Return a callable for the get model method over gRPC.
+
+ Gets a Model.
+
+ Returns:
+ Callable[[~.GetModelRequest],
+ Awaitable[~.Model]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "get_model" not in self._stubs:
+ self._stubs["get_model"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.ModelService/GetModel",
+ request_serializer=model_service.GetModelRequest.serialize,
+ response_deserializer=model.Model.deserialize,
+ )
+ return self._stubs["get_model"]
+
+ @property
+ def list_models(
+ self,
+ ) -> Callable[
+ [model_service.ListModelsRequest], Awaitable[model_service.ListModelsResponse]
+ ]:
+ r"""Return a callable for the list models method over gRPC.
+
+ Lists Models in a Location.
+
+ Returns:
+ Callable[[~.ListModelsRequest],
+ Awaitable[~.ListModelsResponse]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_models" not in self._stubs:
+ self._stubs["list_models"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.ModelService/ListModels",
+ request_serializer=model_service.ListModelsRequest.serialize,
+ response_deserializer=model_service.ListModelsResponse.deserialize,
+ )
+ return self._stubs["list_models"]
+
+ @property
+ def update_model(
+ self,
+ ) -> Callable[[model_service.UpdateModelRequest], Awaitable[gca_model.Model]]:
+ r"""Return a callable for the update model method over gRPC.
+
+ Updates a Model.
+
+ Returns:
+ Callable[[~.UpdateModelRequest],
+ Awaitable[~.Model]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "update_model" not in self._stubs:
+ self._stubs["update_model"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.ModelService/UpdateModel",
+ request_serializer=model_service.UpdateModelRequest.serialize,
+ response_deserializer=gca_model.Model.deserialize,
+ )
+ return self._stubs["update_model"]
+
+ @property
+ def delete_model(
+ self,
+ ) -> Callable[[model_service.DeleteModelRequest], Awaitable[operations.Operation]]:
+ r"""Return a callable for the delete model method over gRPC.
+
+ Deletes a Model.
+ Note: Model can only be deleted if there are no
+ DeployedModels created from it.
+
+ Returns:
+ Callable[[~.DeleteModelRequest],
+ Awaitable[~.Operation]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "delete_model" not in self._stubs:
+ self._stubs["delete_model"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.ModelService/DeleteModel",
+ request_serializer=model_service.DeleteModelRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["delete_model"]
+
+ @property
+ def export_model(
+ self,
+ ) -> Callable[[model_service.ExportModelRequest], Awaitable[operations.Operation]]:
+ r"""Return a callable for the export model method over gRPC.
+
+ Exports a trained, exportable, Model to a location specified by
+ the user. A Model is considered to be exportable if it has at
+ least one [supported export
+ format][google.cloud.aiplatform.v1.Model.supported_export_formats].
+
+ Returns:
+ Callable[[~.ExportModelRequest],
+ Awaitable[~.Operation]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "export_model" not in self._stubs:
+ self._stubs["export_model"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.ModelService/ExportModel",
+ request_serializer=model_service.ExportModelRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["export_model"]
+
+ @property
+ def get_model_evaluation(
+ self,
+ ) -> Callable[
+ [model_service.GetModelEvaluationRequest],
+ Awaitable[model_evaluation.ModelEvaluation],
+ ]:
+ r"""Return a callable for the get model evaluation method over gRPC.
+
+ Gets a ModelEvaluation.
+
+ Returns:
+ Callable[[~.GetModelEvaluationRequest],
+ Awaitable[~.ModelEvaluation]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "get_model_evaluation" not in self._stubs:
+ self._stubs["get_model_evaluation"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.ModelService/GetModelEvaluation",
+ request_serializer=model_service.GetModelEvaluationRequest.serialize,
+ response_deserializer=model_evaluation.ModelEvaluation.deserialize,
+ )
+ return self._stubs["get_model_evaluation"]
+
+ @property
+ def list_model_evaluations(
+ self,
+ ) -> Callable[
+ [model_service.ListModelEvaluationsRequest],
+ Awaitable[model_service.ListModelEvaluationsResponse],
+ ]:
+ r"""Return a callable for the list model evaluations method over gRPC.
+
+ Lists ModelEvaluations in a Model.
+
+ Returns:
+ Callable[[~.ListModelEvaluationsRequest],
+ Awaitable[~.ListModelEvaluationsResponse]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_model_evaluations" not in self._stubs:
+ self._stubs["list_model_evaluations"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.ModelService/ListModelEvaluations",
+ request_serializer=model_service.ListModelEvaluationsRequest.serialize,
+ response_deserializer=model_service.ListModelEvaluationsResponse.deserialize,
+ )
+ return self._stubs["list_model_evaluations"]
+
+ @property
+ def get_model_evaluation_slice(
+ self,
+ ) -> Callable[
+ [model_service.GetModelEvaluationSliceRequest],
+ Awaitable[model_evaluation_slice.ModelEvaluationSlice],
+ ]:
+ r"""Return a callable for the get model evaluation slice method over gRPC.
+
+ Gets a ModelEvaluationSlice.
+
+ Returns:
+ Callable[[~.GetModelEvaluationSliceRequest],
+ Awaitable[~.ModelEvaluationSlice]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "get_model_evaluation_slice" not in self._stubs:
+ self._stubs["get_model_evaluation_slice"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.ModelService/GetModelEvaluationSlice",
+ request_serializer=model_service.GetModelEvaluationSliceRequest.serialize,
+ response_deserializer=model_evaluation_slice.ModelEvaluationSlice.deserialize,
+ )
+ return self._stubs["get_model_evaluation_slice"]
+
+ @property
+ def list_model_evaluation_slices(
+ self,
+ ) -> Callable[
+ [model_service.ListModelEvaluationSlicesRequest],
+ Awaitable[model_service.ListModelEvaluationSlicesResponse],
+ ]:
+ r"""Return a callable for the list model evaluation slices method over gRPC.
+
+ Lists ModelEvaluationSlices in a ModelEvaluation.
+
+ Returns:
+ Callable[[~.ListModelEvaluationSlicesRequest],
+ Awaitable[~.ListModelEvaluationSlicesResponse]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_model_evaluation_slices" not in self._stubs:
+ self._stubs["list_model_evaluation_slices"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.ModelService/ListModelEvaluationSlices",
+ request_serializer=model_service.ListModelEvaluationSlicesRequest.serialize,
+ response_deserializer=model_service.ListModelEvaluationSlicesResponse.deserialize,
+ )
+ return self._stubs["list_model_evaluation_slices"]
+
+
+__all__ = ("ModelServiceGrpcAsyncIOTransport",)
diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/__init__.py b/google/cloud/aiplatform_v1/services/pipeline_service/__init__.py
new file mode 100644
index 0000000000..7f02b47358
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/pipeline_service/__init__.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from .client import PipelineServiceClient
+from .async_client import PipelineServiceAsyncClient
+
+__all__ = (
+ "PipelineServiceClient",
+ "PipelineServiceAsyncClient",
+)
diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py b/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py
new file mode 100644
index 0000000000..95c7d8a176
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py
@@ -0,0 +1,600 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from collections import OrderedDict
+import functools
+import re
+from typing import Dict, Sequence, Tuple, Type, Union
+import pkg_resources
+
+import google.api_core.client_options as ClientOptions # type: ignore
+from google.api_core import exceptions # type: ignore
+from google.api_core import gapic_v1 # type: ignore
+from google.api_core import retry as retries # type: ignore
+from google.auth import credentials # type: ignore
+from google.oauth2 import service_account # type: ignore
+
+from google.api_core import operation as ga_operation # type: ignore
+from google.api_core import operation_async # type: ignore
+from google.cloud.aiplatform_v1.services.pipeline_service import pagers
+from google.cloud.aiplatform_v1.types import encryption_spec
+from google.cloud.aiplatform_v1.types import model
+from google.cloud.aiplatform_v1.types import operation as gca_operation
+from google.cloud.aiplatform_v1.types import pipeline_service
+from google.cloud.aiplatform_v1.types import pipeline_state
+from google.cloud.aiplatform_v1.types import training_pipeline
+from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline
+from google.protobuf import empty_pb2 as empty # type: ignore
+from google.protobuf import struct_pb2 as struct # type: ignore
+from google.protobuf import timestamp_pb2 as timestamp # type: ignore
+from google.rpc import status_pb2 as status # type: ignore
+
+from .transports.base import PipelineServiceTransport, DEFAULT_CLIENT_INFO
+from .transports.grpc_asyncio import PipelineServiceGrpcAsyncIOTransport
+from .client import PipelineServiceClient
+
+
+class PipelineServiceAsyncClient:
+ """A service for creating and managing AI Platform's pipelines."""
+
+ _client: PipelineServiceClient
+
+ DEFAULT_ENDPOINT = PipelineServiceClient.DEFAULT_ENDPOINT
+ DEFAULT_MTLS_ENDPOINT = PipelineServiceClient.DEFAULT_MTLS_ENDPOINT
+
+ endpoint_path = staticmethod(PipelineServiceClient.endpoint_path)
+ parse_endpoint_path = staticmethod(PipelineServiceClient.parse_endpoint_path)
+ model_path = staticmethod(PipelineServiceClient.model_path)
+ parse_model_path = staticmethod(PipelineServiceClient.parse_model_path)
+ training_pipeline_path = staticmethod(PipelineServiceClient.training_pipeline_path)
+ parse_training_pipeline_path = staticmethod(
+ PipelineServiceClient.parse_training_pipeline_path
+ )
+
+ common_billing_account_path = staticmethod(
+ PipelineServiceClient.common_billing_account_path
+ )
+ parse_common_billing_account_path = staticmethod(
+ PipelineServiceClient.parse_common_billing_account_path
+ )
+
+ common_folder_path = staticmethod(PipelineServiceClient.common_folder_path)
+ parse_common_folder_path = staticmethod(
+ PipelineServiceClient.parse_common_folder_path
+ )
+
+ common_organization_path = staticmethod(
+ PipelineServiceClient.common_organization_path
+ )
+ parse_common_organization_path = staticmethod(
+ PipelineServiceClient.parse_common_organization_path
+ )
+
+ common_project_path = staticmethod(PipelineServiceClient.common_project_path)
+ parse_common_project_path = staticmethod(
+ PipelineServiceClient.parse_common_project_path
+ )
+
+ common_location_path = staticmethod(PipelineServiceClient.common_location_path)
+ parse_common_location_path = staticmethod(
+ PipelineServiceClient.parse_common_location_path
+ )
+
+ from_service_account_info = PipelineServiceClient.from_service_account_info
+ from_service_account_file = PipelineServiceClient.from_service_account_file
+ from_service_account_json = from_service_account_file
+
+ @property
+ def transport(self) -> PipelineServiceTransport:
+ """Return the transport used by the client instance.
+
+ Returns:
+ PipelineServiceTransport: The transport used by the client instance.
+ """
+ return self._client.transport
+
+ get_transport_class = functools.partial(
+ type(PipelineServiceClient).get_transport_class, type(PipelineServiceClient)
+ )
+
+ def __init__(
+ self,
+ *,
+ credentials: credentials.Credentials = None,
+ transport: Union[str, PipelineServiceTransport] = "grpc_asyncio",
+ client_options: ClientOptions = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ ) -> None:
+ """Instantiate the pipeline service client.
+
+ Args:
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ transport (Union[str, ~.PipelineServiceTransport]): The
+ transport to use. If set to None, a transport is chosen
+ automatically.
+ client_options (ClientOptions): Custom options for the client. It
+ won't take effect if a ``transport`` instance is provided.
+ (1) The ``api_endpoint`` property can be used to override the
+ default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
+ environment variable can also be used to override the endpoint:
+ "always" (always use the default mTLS endpoint), "never" (always
+ use the default regular endpoint) and "auto" (auto switch to the
+ default mTLS endpoint if client certificate is present, this is
+ the default value). However, the ``api_endpoint`` property takes
+ precedence if provided.
+ (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
+ is "true", then the ``client_cert_source`` property can be used
+ to provide client certificate for mutual TLS transport. If
+ not provided, the default SSL client certificate will be used if
+ present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
+ set, no client certificate will be used.
+
+ Raises:
+ google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
+ creation failed for any reason.
+ """
+
+ self._client = PipelineServiceClient(
+ credentials=credentials,
+ transport=transport,
+ client_options=client_options,
+ client_info=client_info,
+ )
+
+ async def create_training_pipeline(
+ self,
+ request: pipeline_service.CreateTrainingPipelineRequest = None,
+ *,
+ parent: str = None,
+ training_pipeline: gca_training_pipeline.TrainingPipeline = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> gca_training_pipeline.TrainingPipeline:
+ r"""Creates a TrainingPipeline. A created
+ TrainingPipeline right away will be attempted to be run.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.CreateTrainingPipelineRequest`):
+ The request object. Request message for
+ ``PipelineService.CreateTrainingPipeline``.
+ parent (:class:`str`):
+ Required. The resource name of the Location to create
+ the TrainingPipeline in. Format:
+ ``projects/{project}/locations/{location}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ training_pipeline (:class:`google.cloud.aiplatform_v1.types.TrainingPipeline`):
+ Required. The TrainingPipeline to
+ create.
+
+ This corresponds to the ``training_pipeline`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.types.TrainingPipeline:
+ The TrainingPipeline orchestrates tasks associated with training a Model. It
+ always executes the training task, and optionally may
+ also export data from AI Platform's Dataset which
+ becomes the training input,
+ ``upload``
+ the Model to AI Platform, and evaluate the Model.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent, training_pipeline])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = pipeline_service.CreateTrainingPipelineRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+ if training_pipeline is not None:
+ request.training_pipeline = training_pipeline
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.create_training_pipeline,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ async def get_training_pipeline(
+ self,
+ request: pipeline_service.GetTrainingPipelineRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> training_pipeline.TrainingPipeline:
+ r"""Gets a TrainingPipeline.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.GetTrainingPipelineRequest`):
+ The request object. Request message for
+ ``PipelineService.GetTrainingPipeline``.
+ name (:class:`str`):
+ Required. The name of the TrainingPipeline resource.
+ Format:
+
+ ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.types.TrainingPipeline:
+ The TrainingPipeline orchestrates tasks associated with training a Model. It
+ always executes the training task, and optionally may
+ also export data from AI Platform's Dataset which
+ becomes the training input,
+ ``upload``
+ the Model to AI Platform, and evaluate the Model.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = pipeline_service.GetTrainingPipelineRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.get_training_pipeline,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ async def list_training_pipelines(
+ self,
+ request: pipeline_service.ListTrainingPipelinesRequest = None,
+ *,
+ parent: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> pagers.ListTrainingPipelinesAsyncPager:
+ r"""Lists TrainingPipelines in a Location.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.ListTrainingPipelinesRequest`):
+ The request object. Request message for
+ ``PipelineService.ListTrainingPipelines``.
+ parent (:class:`str`):
+ Required. The resource name of the Location to list the
+ TrainingPipelines from. Format:
+ ``projects/{project}/locations/{location}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.services.pipeline_service.pagers.ListTrainingPipelinesAsyncPager:
+ Response message for
+ ``PipelineService.ListTrainingPipelines``
+
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = pipeline_service.ListTrainingPipelinesRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.list_training_pipelines,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__aiter__` convenience method.
+ response = pagers.ListTrainingPipelinesAsyncPager(
+ method=rpc, request=request, response=response, metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def delete_training_pipeline(
+ self,
+ request: pipeline_service.DeleteTrainingPipelineRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Deletes a TrainingPipeline.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.DeleteTrainingPipelineRequest`):
+ The request object. Request message for
+ ``PipelineService.DeleteTrainingPipeline``.
+ name (:class:`str`):
+ Required. The name of the TrainingPipeline resource to
+ be deleted. Format:
+
+ ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
+ empty messages in your APIs. A typical example is to
+ use it as the request or the response type of an API
+ method. For instance:
+
+ service Foo {
+ rpc Bar(google.protobuf.Empty) returns
+ (google.protobuf.Empty);
+
+ }
+
+ The JSON representation for Empty is empty JSON
+ object {}.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = pipeline_service.DeleteTrainingPipelineRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.delete_training_pipeline,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ empty.Empty,
+ metadata_type=gca_operation.DeleteOperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def cancel_training_pipeline(
+ self,
+ request: pipeline_service.CancelTrainingPipelineRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> None:
+ r"""Cancels a TrainingPipeline. Starts asynchronous cancellation on
+ the TrainingPipeline. The server makes a best effort to cancel
+ the pipeline, but success is not guaranteed. Clients can use
+ ``PipelineService.GetTrainingPipeline``
+ or other methods to check whether the cancellation succeeded or
+ whether the pipeline completed despite cancellation. On
+ successful cancellation, the TrainingPipeline is not deleted;
+ instead it becomes a pipeline with a
+ ``TrainingPipeline.error``
+ value with a ``google.rpc.Status.code`` of
+ 1, corresponding to ``Code.CANCELLED``, and
+ ``TrainingPipeline.state``
+ is set to ``CANCELLED``.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.CancelTrainingPipelineRequest`):
+ The request object. Request message for
+ ``PipelineService.CancelTrainingPipeline``.
+ name (:class:`str`):
+ Required. The name of the TrainingPipeline to cancel.
+ Format:
+
+ ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = pipeline_service.CancelTrainingPipelineRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.cancel_training_pipeline,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ await rpc(
+ request, retry=retry, timeout=timeout, metadata=metadata,
+ )
+
+
+try:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=pkg_resources.get_distribution(
+ "google-cloud-aiplatform",
+ ).version,
+ )
+except pkg_resources.DistributionNotFound:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
+
+
+__all__ = ("PipelineServiceAsyncClient",)
diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/client.py b/google/cloud/aiplatform_v1/services/pipeline_service/client.py
new file mode 100644
index 0000000000..39f37eb72e
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/pipeline_service/client.py
@@ -0,0 +1,835 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from collections import OrderedDict
+from distutils import util
+import os
+import re
+from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
+import pkg_resources
+
+from google.api_core import client_options as client_options_lib # type: ignore
+from google.api_core import exceptions # type: ignore
+from google.api_core import gapic_v1 # type: ignore
+from google.api_core import retry as retries # type: ignore
+from google.auth import credentials # type: ignore
+from google.auth.transport import mtls # type: ignore
+from google.auth.transport.grpc import SslCredentials # type: ignore
+from google.auth.exceptions import MutualTLSChannelError # type: ignore
+from google.oauth2 import service_account # type: ignore
+
+from google.api_core import operation as ga_operation # type: ignore
+from google.api_core import operation_async # type: ignore
+from google.cloud.aiplatform_v1.services.pipeline_service import pagers
+from google.cloud.aiplatform_v1.types import encryption_spec
+from google.cloud.aiplatform_v1.types import model
+from google.cloud.aiplatform_v1.types import operation as gca_operation
+from google.cloud.aiplatform_v1.types import pipeline_service
+from google.cloud.aiplatform_v1.types import pipeline_state
+from google.cloud.aiplatform_v1.types import training_pipeline
+from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline
+from google.protobuf import empty_pb2 as empty # type: ignore
+from google.protobuf import struct_pb2 as struct # type: ignore
+from google.protobuf import timestamp_pb2 as timestamp # type: ignore
+from google.rpc import status_pb2 as status # type: ignore
+
+from .transports.base import PipelineServiceTransport, DEFAULT_CLIENT_INFO
+from .transports.grpc import PipelineServiceGrpcTransport
+from .transports.grpc_asyncio import PipelineServiceGrpcAsyncIOTransport
+
+
+class PipelineServiceClientMeta(type):
+ """Metaclass for the PipelineService client.
+
+ This provides class-level methods for building and retrieving
+ support objects (e.g. transport) without polluting the client instance
+ objects.
+ """
+
+ _transport_registry = (
+ OrderedDict()
+ ) # type: Dict[str, Type[PipelineServiceTransport]]
+ _transport_registry["grpc"] = PipelineServiceGrpcTransport
+ _transport_registry["grpc_asyncio"] = PipelineServiceGrpcAsyncIOTransport
+
+ def get_transport_class(cls, label: str = None,) -> Type[PipelineServiceTransport]:
+ """Return an appropriate transport class.
+
+ Args:
+ label: The name of the desired transport. If none is
+ provided, then the first transport in the registry is used.
+
+ Returns:
+ The transport class to use.
+ """
+ # If a specific transport is requested, return that one.
+ if label:
+ return cls._transport_registry[label]
+
+ # No transport is requested; return the default (that is, the first one
+ # in the dictionary).
+ return next(iter(cls._transport_registry.values()))
+
+
+class PipelineServiceClient(metaclass=PipelineServiceClientMeta):
+ """A service for creating and managing AI Platform's pipelines."""
+
+ @staticmethod
+ def _get_default_mtls_endpoint(api_endpoint):
+ """Convert api endpoint to mTLS endpoint.
+ Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
+ "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
+ Args:
+ api_endpoint (Optional[str]): the api endpoint to convert.
+ Returns:
+ str: converted mTLS api endpoint.
+ """
+ if not api_endpoint:
+ return api_endpoint
+
+ mtls_endpoint_re = re.compile(
+ r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?"
+ )
+
+ m = mtls_endpoint_re.match(api_endpoint)
+ name, mtls, sandbox, googledomain = m.groups()
+ if mtls or not googledomain:
+ return api_endpoint
+
+ if sandbox:
+ return api_endpoint.replace(
+ "sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
+ )
+
+ return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
+
+ DEFAULT_ENDPOINT = "aiplatform.googleapis.com"
+ DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
+ DEFAULT_ENDPOINT
+ )
+
+ @classmethod
+ def from_service_account_info(cls, info: dict, *args, **kwargs):
+ """Creates an instance of this client using the provided credentials info.
+
+ Args:
+ info (dict): The service account private key info.
+ args: Additional arguments to pass to the constructor.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ PipelineServiceClient: The constructed client.
+ """
+ credentials = service_account.Credentials.from_service_account_info(info)
+ kwargs["credentials"] = credentials
+ return cls(*args, **kwargs)
+
+ @classmethod
+ def from_service_account_file(cls, filename: str, *args, **kwargs):
+ """Creates an instance of this client using the provided credentials
+ file.
+
+ Args:
+ filename (str): The path to the service account private key json
+ file.
+ args: Additional arguments to pass to the constructor.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ PipelineServiceClient: The constructed client.
+ """
+ credentials = service_account.Credentials.from_service_account_file(filename)
+ kwargs["credentials"] = credentials
+ return cls(*args, **kwargs)
+
+ from_service_account_json = from_service_account_file
+
+ @property
+ def transport(self) -> PipelineServiceTransport:
+ """Return the transport used by the client instance.
+
+ Returns:
+ PipelineServiceTransport: The transport used by the client instance.
+ """
+ return self._transport
+
+ @staticmethod
+ def endpoint_path(project: str, location: str, endpoint: str,) -> str:
+ """Return a fully-qualified endpoint string."""
+ return "projects/{project}/locations/{location}/endpoints/{endpoint}".format(
+ project=project, location=location, endpoint=endpoint,
+ )
+
+ @staticmethod
+ def parse_endpoint_path(path: str) -> Dict[str, str]:
+ """Parse a endpoint path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def model_path(project: str, location: str, model: str,) -> str:
+ """Return a fully-qualified model string."""
+ return "projects/{project}/locations/{location}/models/{model}".format(
+ project=project, location=location, model=model,
+ )
+
+ @staticmethod
+ def parse_model_path(path: str) -> Dict[str, str]:
+ """Parse a model path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def training_pipeline_path(
+ project: str, location: str, training_pipeline: str,
+ ) -> str:
+ """Return a fully-qualified training_pipeline string."""
+ return "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format(
+ project=project, location=location, training_pipeline=training_pipeline,
+ )
+
+ @staticmethod
+ def parse_training_pipeline_path(path: str) -> Dict[str, str]:
+ """Parse a training_pipeline path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/locations/(?P.+?)/trainingPipelines/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_billing_account_path(billing_account: str,) -> str:
+ """Return a fully-qualified billing_account string."""
+ return "billingAccounts/{billing_account}".format(
+ billing_account=billing_account,
+ )
+
+ @staticmethod
+ def parse_common_billing_account_path(path: str) -> Dict[str, str]:
+ """Parse a billing_account path into its component segments."""
+ m = re.match(r"^billingAccounts/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_folder_path(folder: str,) -> str:
+ """Return a fully-qualified folder string."""
+ return "folders/{folder}".format(folder=folder,)
+
+ @staticmethod
+ def parse_common_folder_path(path: str) -> Dict[str, str]:
+ """Parse a folder path into its component segments."""
+ m = re.match(r"^folders/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_organization_path(organization: str,) -> str:
+ """Return a fully-qualified organization string."""
+ return "organizations/{organization}".format(organization=organization,)
+
+ @staticmethod
+ def parse_common_organization_path(path: str) -> Dict[str, str]:
+ """Parse a organization path into its component segments."""
+ m = re.match(r"^organizations/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_project_path(project: str,) -> str:
+ """Return a fully-qualified project string."""
+ return "projects/{project}".format(project=project,)
+
+ @staticmethod
+ def parse_common_project_path(path: str) -> Dict[str, str]:
+ """Parse a project path into its component segments."""
+ m = re.match(r"^projects/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_location_path(project: str, location: str,) -> str:
+ """Return a fully-qualified location string."""
+ return "projects/{project}/locations/{location}".format(
+ project=project, location=location,
+ )
+
+ @staticmethod
+ def parse_common_location_path(path: str) -> Dict[str, str]:
+ """Parse a location path into its component segments."""
+ m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ def __init__(
+ self,
+ *,
+ credentials: Optional[credentials.Credentials] = None,
+ transport: Union[str, PipelineServiceTransport, None] = None,
+ client_options: Optional[client_options_lib.ClientOptions] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ ) -> None:
+ """Instantiate the pipeline service client.
+
+ Args:
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ transport (Union[str, PipelineServiceTransport]): The
+ transport to use. If set to None, a transport is chosen
+ automatically.
+ client_options (google.api_core.client_options.ClientOptions): Custom options for the
+ client. It won't take effect if a ``transport`` instance is provided.
+ (1) The ``api_endpoint`` property can be used to override the
+ default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
+ environment variable can also be used to override the endpoint:
+ "always" (always use the default mTLS endpoint), "never" (always
+ use the default regular endpoint) and "auto" (auto switch to the
+ default mTLS endpoint if client certificate is present, this is
+ the default value). However, the ``api_endpoint`` property takes
+ precedence if provided.
+ (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
+ is "true", then the ``client_cert_source`` property can be used
+ to provide client certificate for mutual TLS transport. If
+ not provided, the default SSL client certificate will be used if
+ present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
+ set, no client certificate will be used.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+
+ Raises:
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
+ creation failed for any reason.
+ """
+ if isinstance(client_options, dict):
+ client_options = client_options_lib.from_dict(client_options)
+ if client_options is None:
+ client_options = client_options_lib.ClientOptions()
+
+ # Create SSL credentials for mutual TLS if needed.
+ use_client_cert = bool(
+ util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
+ )
+
+ client_cert_source_func = None
+ is_mtls = False
+ if use_client_cert:
+ if client_options.client_cert_source:
+ is_mtls = True
+ client_cert_source_func = client_options.client_cert_source
+ else:
+ is_mtls = mtls.has_default_client_cert_source()
+ client_cert_source_func = (
+ mtls.default_client_cert_source() if is_mtls else None
+ )
+
+ # Figure out which api endpoint to use.
+ if client_options.api_endpoint is not None:
+ api_endpoint = client_options.api_endpoint
+ else:
+ use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
+ if use_mtls_env == "never":
+ api_endpoint = self.DEFAULT_ENDPOINT
+ elif use_mtls_env == "always":
+ api_endpoint = self.DEFAULT_MTLS_ENDPOINT
+ elif use_mtls_env == "auto":
+ api_endpoint = (
+ self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT
+ )
+ else:
+ raise MutualTLSChannelError(
+ "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
+ )
+
+ # Save or instantiate the transport.
+ # Ordinarily, we provide the transport, but allowing a custom transport
+ # instance provides an extensibility point for unusual situations.
+ if isinstance(transport, PipelineServiceTransport):
+ # transport is a PipelineServiceTransport instance.
+ if credentials or client_options.credentials_file:
+ raise ValueError(
+ "When providing a transport instance, "
+ "provide its credentials directly."
+ )
+ if client_options.scopes:
+ raise ValueError(
+ "When providing a transport instance, "
+ "provide its scopes directly."
+ )
+ self._transport = transport
+ else:
+ Transport = type(self).get_transport_class(transport)
+ self._transport = Transport(
+ credentials=credentials,
+ credentials_file=client_options.credentials_file,
+ host=api_endpoint,
+ scopes=client_options.scopes,
+ client_cert_source_for_mtls=client_cert_source_func,
+ quota_project_id=client_options.quota_project_id,
+ client_info=client_info,
+ )
+
+ def create_training_pipeline(
+ self,
+ request: pipeline_service.CreateTrainingPipelineRequest = None,
+ *,
+ parent: str = None,
+ training_pipeline: gca_training_pipeline.TrainingPipeline = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> gca_training_pipeline.TrainingPipeline:
+ r"""Creates a TrainingPipeline. A created
+ TrainingPipeline right away will be attempted to be run.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.CreateTrainingPipelineRequest):
+ The request object. Request message for
+ ``PipelineService.CreateTrainingPipeline``.
+ parent (str):
+ Required. The resource name of the Location to create
+ the TrainingPipeline in. Format:
+ ``projects/{project}/locations/{location}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ training_pipeline (google.cloud.aiplatform_v1.types.TrainingPipeline):
+ Required. The TrainingPipeline to
+ create.
+
+ This corresponds to the ``training_pipeline`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.types.TrainingPipeline:
+ The TrainingPipeline orchestrates tasks associated with training a Model. It
+ always executes the training task, and optionally may
+ also export data from AI Platform's Dataset which
+ becomes the training input,
+ ``upload``
+ the Model to AI Platform, and evaluate the Model.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent, training_pipeline])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a pipeline_service.CreateTrainingPipelineRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, pipeline_service.CreateTrainingPipelineRequest):
+ request = pipeline_service.CreateTrainingPipelineRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+ if training_pipeline is not None:
+ request.training_pipeline = training_pipeline
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.create_training_pipeline]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ def get_training_pipeline(
+ self,
+ request: pipeline_service.GetTrainingPipelineRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> training_pipeline.TrainingPipeline:
+ r"""Gets a TrainingPipeline.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.GetTrainingPipelineRequest):
+ The request object. Request message for
+ ``PipelineService.GetTrainingPipeline``.
+ name (str):
+ Required. The name of the TrainingPipeline resource.
+ Format:
+
+ ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.types.TrainingPipeline:
+ The TrainingPipeline orchestrates tasks associated with training a Model. It
+ always executes the training task, and optionally may
+ also export data from AI Platform's Dataset which
+ becomes the training input,
+ ``upload``
+ the Model to AI Platform, and evaluate the Model.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a pipeline_service.GetTrainingPipelineRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, pipeline_service.GetTrainingPipelineRequest):
+ request = pipeline_service.GetTrainingPipelineRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.get_training_pipeline]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ def list_training_pipelines(
+ self,
+ request: pipeline_service.ListTrainingPipelinesRequest = None,
+ *,
+ parent: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> pagers.ListTrainingPipelinesPager:
+ r"""Lists TrainingPipelines in a Location.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.ListTrainingPipelinesRequest):
+ The request object. Request message for
+ ``PipelineService.ListTrainingPipelines``.
+ parent (str):
+ Required. The resource name of the Location to list the
+ TrainingPipelines from. Format:
+ ``projects/{project}/locations/{location}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.services.pipeline_service.pagers.ListTrainingPipelinesPager:
+ Response message for
+ ``PipelineService.ListTrainingPipelines``
+
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a pipeline_service.ListTrainingPipelinesRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, pipeline_service.ListTrainingPipelinesRequest):
+ request = pipeline_service.ListTrainingPipelinesRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.list_training_pipelines]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__iter__` convenience method.
+ response = pagers.ListTrainingPipelinesPager(
+ method=rpc, request=request, response=response, metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def delete_training_pipeline(
+ self,
+ request: pipeline_service.DeleteTrainingPipelineRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> ga_operation.Operation:
+ r"""Deletes a TrainingPipeline.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.DeleteTrainingPipelineRequest):
+ The request object. Request message for
+ ``PipelineService.DeleteTrainingPipeline``.
+ name (str):
+ Required. The name of the TrainingPipeline resource to
+ be deleted. Format:
+
+ ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation.Operation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
+ empty messages in your APIs. A typical example is to
+ use it as the request or the response type of an API
+ method. For instance:
+
+ service Foo {
+ rpc Bar(google.protobuf.Empty) returns
+ (google.protobuf.Empty);
+
+ }
+
+ The JSON representation for Empty is empty JSON
+ object {}.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a pipeline_service.DeleteTrainingPipelineRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, pipeline_service.DeleteTrainingPipelineRequest):
+ request = pipeline_service.DeleteTrainingPipelineRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.delete_training_pipeline]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = ga_operation.from_gapic(
+ response,
+ self._transport.operations_client,
+ empty.Empty,
+ metadata_type=gca_operation.DeleteOperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def cancel_training_pipeline(
+ self,
+ request: pipeline_service.CancelTrainingPipelineRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> None:
+ r"""Cancels a TrainingPipeline. Starts asynchronous cancellation on
+ the TrainingPipeline. The server makes a best effort to cancel
+ the pipeline, but success is not guaranteed. Clients can use
+ ``PipelineService.GetTrainingPipeline``
+ or other methods to check whether the cancellation succeeded or
+ whether the pipeline completed despite cancellation. On
+ successful cancellation, the TrainingPipeline is not deleted;
+ instead it becomes a pipeline with a
+ ``TrainingPipeline.error``
+ value with a ``google.rpc.Status.code`` of
+ 1, corresponding to ``Code.CANCELLED``, and
+ ``TrainingPipeline.state``
+ is set to ``CANCELLED``.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.CancelTrainingPipelineRequest):
+ The request object. Request message for
+ ``PipelineService.CancelTrainingPipeline``.
+ name (str):
+ Required. The name of the TrainingPipeline to cancel.
+ Format:
+
+ ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a pipeline_service.CancelTrainingPipelineRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, pipeline_service.CancelTrainingPipelineRequest):
+ request = pipeline_service.CancelTrainingPipelineRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.cancel_training_pipeline]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ rpc(
+ request, retry=retry, timeout=timeout, metadata=metadata,
+ )
+
+
+try:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=pkg_resources.get_distribution(
+ "google-cloud-aiplatform",
+ ).version,
+ )
+except pkg_resources.DistributionNotFound:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
+
+
+__all__ = ("PipelineServiceClient",)
diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/pagers.py b/google/cloud/aiplatform_v1/services/pipeline_service/pagers.py
new file mode 100644
index 0000000000..0f3503ff5a
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/pipeline_service/pagers.py
@@ -0,0 +1,153 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple
+
+from google.cloud.aiplatform_v1.types import pipeline_service
+from google.cloud.aiplatform_v1.types import training_pipeline
+
+
+class ListTrainingPipelinesPager:
+ """A pager for iterating through ``list_training_pipelines`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.cloud.aiplatform_v1.types.ListTrainingPipelinesResponse` object, and
+ provides an ``__iter__`` method to iterate through its
+ ``training_pipelines`` field.
+
+ If there are more pages, the ``__iter__`` method will make additional
+ ``ListTrainingPipelines`` requests and continue to iterate
+ through the ``training_pipelines`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.cloud.aiplatform_v1.types.ListTrainingPipelinesResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[..., pipeline_service.ListTrainingPipelinesResponse],
+ request: pipeline_service.ListTrainingPipelinesRequest,
+ response: pipeline_service.ListTrainingPipelinesResponse,
+ *,
+ metadata: Sequence[Tuple[str, str]] = ()
+ ):
+ """Instantiate the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.cloud.aiplatform_v1.types.ListTrainingPipelinesRequest):
+ The initial request object.
+ response (google.cloud.aiplatform_v1.types.ListTrainingPipelinesResponse):
+ The initial response object.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ self._method = method
+ self._request = pipeline_service.ListTrainingPipelinesRequest(request)
+ self._response = response
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ def pages(self) -> Iterable[pipeline_service.ListTrainingPipelinesResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = self._method(self._request, metadata=self._metadata)
+ yield self._response
+
+ def __iter__(self) -> Iterable[training_pipeline.TrainingPipeline]:
+ for page in self.pages:
+ yield from page.training_pipelines
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
+
+
+class ListTrainingPipelinesAsyncPager:
+ """A pager for iterating through ``list_training_pipelines`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.cloud.aiplatform_v1.types.ListTrainingPipelinesResponse` object, and
+ provides an ``__aiter__`` method to iterate through its
+ ``training_pipelines`` field.
+
+ If there are more pages, the ``__aiter__`` method will make additional
+ ``ListTrainingPipelines`` requests and continue to iterate
+ through the ``training_pipelines`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.cloud.aiplatform_v1.types.ListTrainingPipelinesResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[
+ ..., Awaitable[pipeline_service.ListTrainingPipelinesResponse]
+ ],
+ request: pipeline_service.ListTrainingPipelinesRequest,
+ response: pipeline_service.ListTrainingPipelinesResponse,
+ *,
+ metadata: Sequence[Tuple[str, str]] = ()
+ ):
+ """Instantiate the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.cloud.aiplatform_v1.types.ListTrainingPipelinesRequest):
+ The initial request object.
+ response (google.cloud.aiplatform_v1.types.ListTrainingPipelinesResponse):
+ The initial response object.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ self._method = method
+ self._request = pipeline_service.ListTrainingPipelinesRequest(request)
+ self._response = response
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ async def pages(
+ self,
+ ) -> AsyncIterable[pipeline_service.ListTrainingPipelinesResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = await self._method(self._request, metadata=self._metadata)
+ yield self._response
+
+ def __aiter__(self) -> AsyncIterable[training_pipeline.TrainingPipeline]:
+ async def async_generator():
+ async for page in self.pages:
+ for response in page.training_pipelines:
+ yield response
+
+ return async_generator()
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/__init__.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/__init__.py
new file mode 100644
index 0000000000..9d4610087a
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/__init__.py
@@ -0,0 +1,35 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from collections import OrderedDict
+from typing import Dict, Type
+
+from .base import PipelineServiceTransport
+from .grpc import PipelineServiceGrpcTransport
+from .grpc_asyncio import PipelineServiceGrpcAsyncIOTransport
+
+
+# Compile a registry of transports.
+_transport_registry = OrderedDict() # type: Dict[str, Type[PipelineServiceTransport]]
+_transport_registry["grpc"] = PipelineServiceGrpcTransport
+_transport_registry["grpc_asyncio"] = PipelineServiceGrpcAsyncIOTransport
+
+__all__ = (
+ "PipelineServiceTransport",
+ "PipelineServiceGrpcTransport",
+ "PipelineServiceGrpcAsyncIOTransport",
+)
diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py
new file mode 100644
index 0000000000..e4bc8e66a8
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py
@@ -0,0 +1,201 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import abc
+import typing
+import pkg_resources
+
+from google import auth # type: ignore
+from google.api_core import exceptions # type: ignore
+from google.api_core import gapic_v1 # type: ignore
+from google.api_core import retry as retries # type: ignore
+from google.api_core import operations_v1 # type: ignore
+from google.auth import credentials # type: ignore
+
+from google.cloud.aiplatform_v1.types import pipeline_service
+from google.cloud.aiplatform_v1.types import training_pipeline
+from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline
+from google.longrunning import operations_pb2 as operations # type: ignore
+from google.protobuf import empty_pb2 as empty # type: ignore
+
+
+try:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=pkg_resources.get_distribution(
+ "google-cloud-aiplatform",
+ ).version,
+ )
+except pkg_resources.DistributionNotFound:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
+
+
+class PipelineServiceTransport(abc.ABC):
+ """Abstract transport class for PipelineService."""
+
+ AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
+
+ def __init__(
+ self,
+ *,
+ host: str = "aiplatform.googleapis.com",
+ credentials: credentials.Credentials = None,
+ credentials_file: typing.Optional[str] = None,
+ scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES,
+ quota_project_id: typing.Optional[str] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ **kwargs,
+ ) -> None:
+ """Instantiate the transport.
+
+ Args:
+ host (Optional[str]): The hostname to connect to.
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is mutually exclusive with credentials.
+ scope (Optional[Sequence[str]]): A list of scopes.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+ """
+ # Save the hostname. Default to port 443 (HTTPS) if none is specified.
+ if ":" not in host:
+ host += ":443"
+ self._host = host
+
+ # If no credentials are provided, then determine the appropriate
+ # defaults.
+ if credentials and credentials_file:
+ raise exceptions.DuplicateCredentialArgs(
+ "'credentials_file' and 'credentials' are mutually exclusive"
+ )
+
+ if credentials_file is not None:
+ credentials, _ = auth.load_credentials_from_file(
+ credentials_file, scopes=scopes, quota_project_id=quota_project_id
+ )
+
+ elif credentials is None:
+ credentials, _ = auth.default(
+ scopes=scopes, quota_project_id=quota_project_id
+ )
+
+ # Save the credentials.
+ self._credentials = credentials
+
+ # Lifted into its own function so it can be stubbed out during tests.
+ self._prep_wrapped_messages(client_info)
+
+ def _prep_wrapped_messages(self, client_info):
+ # Precompute the wrapped methods.
+ self._wrapped_methods = {
+ self.create_training_pipeline: gapic_v1.method.wrap_method(
+ self.create_training_pipeline,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.get_training_pipeline: gapic_v1.method.wrap_method(
+ self.get_training_pipeline,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.list_training_pipelines: gapic_v1.method.wrap_method(
+ self.list_training_pipelines,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.delete_training_pipeline: gapic_v1.method.wrap_method(
+ self.delete_training_pipeline,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.cancel_training_pipeline: gapic_v1.method.wrap_method(
+ self.cancel_training_pipeline,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ }
+
+ @property
+ def operations_client(self) -> operations_v1.OperationsClient:
+ """Return the client designed to process long-running operations."""
+ raise NotImplementedError()
+
+ @property
+ def create_training_pipeline(
+ self,
+ ) -> typing.Callable[
+ [pipeline_service.CreateTrainingPipelineRequest],
+ typing.Union[
+ gca_training_pipeline.TrainingPipeline,
+ typing.Awaitable[gca_training_pipeline.TrainingPipeline],
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def get_training_pipeline(
+ self,
+ ) -> typing.Callable[
+ [pipeline_service.GetTrainingPipelineRequest],
+ typing.Union[
+ training_pipeline.TrainingPipeline,
+ typing.Awaitable[training_pipeline.TrainingPipeline],
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def list_training_pipelines(
+ self,
+ ) -> typing.Callable[
+ [pipeline_service.ListTrainingPipelinesRequest],
+ typing.Union[
+ pipeline_service.ListTrainingPipelinesResponse,
+ typing.Awaitable[pipeline_service.ListTrainingPipelinesResponse],
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def delete_training_pipeline(
+ self,
+ ) -> typing.Callable[
+ [pipeline_service.DeleteTrainingPipelineRequest],
+ typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def cancel_training_pipeline(
+ self,
+ ) -> typing.Callable[
+ [pipeline_service.CancelTrainingPipelineRequest],
+ typing.Union[empty.Empty, typing.Awaitable[empty.Empty]],
+ ]:
+ raise NotImplementedError()
+
+
+__all__ = ("PipelineServiceTransport",)
diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py
new file mode 100644
index 0000000000..818144f008
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py
@@ -0,0 +1,426 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import warnings
+from typing import Callable, Dict, Optional, Sequence, Tuple
+
+from google.api_core import grpc_helpers # type: ignore
+from google.api_core import operations_v1 # type: ignore
+from google.api_core import gapic_v1 # type: ignore
+from google import auth # type: ignore
+from google.auth import credentials # type: ignore
+from google.auth.transport.grpc import SslCredentials # type: ignore
+
+import grpc # type: ignore
+
+from google.cloud.aiplatform_v1.types import pipeline_service
+from google.cloud.aiplatform_v1.types import training_pipeline
+from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline
+from google.longrunning import operations_pb2 as operations # type: ignore
+from google.protobuf import empty_pb2 as empty # type: ignore
+
+from .base import PipelineServiceTransport, DEFAULT_CLIENT_INFO
+
+
+class PipelineServiceGrpcTransport(PipelineServiceTransport):
+ """gRPC backend transport for PipelineService.
+
+ A service for creating and managing AI Platform's pipelines.
+
+ This class defines the same methods as the primary client, so the
+ primary client can load the underlying transport implementation
+ and call it.
+
+ It sends protocol buffers over the wire using gRPC (which is built on
+ top of HTTP/2); the ``grpcio`` package must be installed.
+ """
+
+ _stubs: Dict[str, Callable]
+
+ def __init__(
+ self,
+ *,
+ host: str = "aiplatform.googleapis.com",
+ credentials: credentials.Credentials = None,
+ credentials_file: str = None,
+ scopes: Sequence[str] = None,
+ channel: grpc.Channel = None,
+ api_mtls_endpoint: str = None,
+ client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
+ ssl_channel_credentials: grpc.ChannelCredentials = None,
+ client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
+ quota_project_id: Optional[str] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ ) -> None:
+ """Instantiate the transport.
+
+ Args:
+ host (Optional[str]): The hostname to connect to.
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ This argument is ignored if ``channel`` is provided.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is ignored if ``channel`` is provided.
+ scopes (Optional(Sequence[str])): A list of scopes. This argument is
+ ignored if ``channel`` is provided.
+ channel (Optional[grpc.Channel]): A ``Channel`` instance through
+ which to make calls.
+ api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
+ If provided, it overrides the ``host`` argument and tries to create
+ a mutual TLS channel with client SSL credentials from
+ ``client_cert_source`` or applicatin default SSL credentials.
+ client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
+ Deprecated. A callback to provide client SSL certificate bytes and
+ private key bytes, both in PEM format. It is ignored if
+ ``api_mtls_endpoint`` is None.
+ ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
+ for grpc channel. It is ignored if ``channel`` is provided.
+ client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
+ A callback to provide client certificate bytes and private key bytes,
+ both in PEM format. It is used to configure mutual TLS channel. It is
+ ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+
+ Raises:
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
+ creation failed for any reason.
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
+ and ``credentials_file`` are passed.
+ """
+ self._ssl_channel_credentials = ssl_channel_credentials
+
+ if api_mtls_endpoint:
+ warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
+ if client_cert_source:
+ warnings.warn("client_cert_source is deprecated", DeprecationWarning)
+
+ if channel:
+ # Sanity check: Ensure that channel and credentials are not both
+ # provided.
+ credentials = False
+
+ # If a channel was explicitly provided, set it.
+ self._grpc_channel = channel
+ self._ssl_channel_credentials = None
+ elif api_mtls_endpoint:
+ host = (
+ api_mtls_endpoint
+ if ":" in api_mtls_endpoint
+ else api_mtls_endpoint + ":443"
+ )
+
+ if credentials is None:
+ credentials, _ = auth.default(
+ scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
+ )
+
+ # Create SSL credentials with client_cert_source or application
+ # default SSL credentials.
+ if client_cert_source:
+ cert, key = client_cert_source()
+ ssl_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+ else:
+ ssl_credentials = SslCredentials().ssl_credentials
+
+ # create a new channel. The provided one is ignored.
+ self._grpc_channel = type(self).create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ ssl_credentials=ssl_credentials,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ self._ssl_channel_credentials = ssl_credentials
+ else:
+ host = host if ":" in host else host + ":443"
+
+ if credentials is None:
+ credentials, _ = auth.default(
+ scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
+ )
+
+ if client_cert_source_for_mtls and not ssl_channel_credentials:
+ cert, key = client_cert_source_for_mtls()
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+
+ # create a new channel. The provided one is ignored.
+ self._grpc_channel = type(self).create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ ssl_credentials=self._ssl_channel_credentials,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+ self._stubs = {} # type: Dict[str, Callable]
+ self._operations_client = None
+
+ # Run the base constructor.
+ super().__init__(
+ host=host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ client_info=client_info,
+ )
+
+ @classmethod
+ def create_channel(
+ cls,
+ host: str = "aiplatform.googleapis.com",
+ credentials: credentials.Credentials = None,
+ credentials_file: str = None,
+ scopes: Optional[Sequence[str]] = None,
+ quota_project_id: Optional[str] = None,
+ **kwargs,
+ ) -> grpc.Channel:
+ """Create and return a gRPC channel object.
+ Args:
+ address (Optional[str]): The host for the channel to use.
+ credentials (Optional[~.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify this application to the service. If
+ none are specified, the client will attempt to ascertain
+ the credentials from the environment.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is mutually exclusive with credentials.
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
+ service. These are only used when credentials are not specified and
+ are passed to :func:`google.auth.default`.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ kwargs (Optional[dict]): Keyword arguments, which are passed to the
+ channel creation.
+ Returns:
+ grpc.Channel: A gRPC channel object.
+
+ Raises:
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
+ and ``credentials_file`` are passed.
+ """
+ scopes = scopes or cls.AUTH_SCOPES
+ return grpc_helpers.create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes,
+ quota_project_id=quota_project_id,
+ **kwargs,
+ )
+
+ @property
+ def grpc_channel(self) -> grpc.Channel:
+ """Return the channel designed to connect to this service.
+ """
+ return self._grpc_channel
+
+ @property
+ def operations_client(self) -> operations_v1.OperationsClient:
+ """Create the client designed to process long-running operations.
+
+ This property caches on the instance; repeated calls return the same
+ client.
+ """
+ # Sanity check: Only create a new client if we do not already have one.
+ if self._operations_client is None:
+ self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
+
+ # Return the client from cache.
+ return self._operations_client
+
+ @property
+ def create_training_pipeline(
+ self,
+ ) -> Callable[
+ [pipeline_service.CreateTrainingPipelineRequest],
+ gca_training_pipeline.TrainingPipeline,
+ ]:
+ r"""Return a callable for the create training pipeline method over gRPC.
+
+ Creates a TrainingPipeline. A created
+ TrainingPipeline right away will be attempted to be run.
+
+ Returns:
+ Callable[[~.CreateTrainingPipelineRequest],
+ ~.TrainingPipeline]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "create_training_pipeline" not in self._stubs:
+ self._stubs["create_training_pipeline"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.PipelineService/CreateTrainingPipeline",
+ request_serializer=pipeline_service.CreateTrainingPipelineRequest.serialize,
+ response_deserializer=gca_training_pipeline.TrainingPipeline.deserialize,
+ )
+ return self._stubs["create_training_pipeline"]
+
+ @property
+ def get_training_pipeline(
+ self,
+ ) -> Callable[
+ [pipeline_service.GetTrainingPipelineRequest],
+ training_pipeline.TrainingPipeline,
+ ]:
+ r"""Return a callable for the get training pipeline method over gRPC.
+
+ Gets a TrainingPipeline.
+
+ Returns:
+ Callable[[~.GetTrainingPipelineRequest],
+ ~.TrainingPipeline]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "get_training_pipeline" not in self._stubs:
+ self._stubs["get_training_pipeline"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.PipelineService/GetTrainingPipeline",
+ request_serializer=pipeline_service.GetTrainingPipelineRequest.serialize,
+ response_deserializer=training_pipeline.TrainingPipeline.deserialize,
+ )
+ return self._stubs["get_training_pipeline"]
+
+ @property
+ def list_training_pipelines(
+ self,
+ ) -> Callable[
+ [pipeline_service.ListTrainingPipelinesRequest],
+ pipeline_service.ListTrainingPipelinesResponse,
+ ]:
+ r"""Return a callable for the list training pipelines method over gRPC.
+
+ Lists TrainingPipelines in a Location.
+
+ Returns:
+ Callable[[~.ListTrainingPipelinesRequest],
+ ~.ListTrainingPipelinesResponse]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_training_pipelines" not in self._stubs:
+ self._stubs["list_training_pipelines"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.PipelineService/ListTrainingPipelines",
+ request_serializer=pipeline_service.ListTrainingPipelinesRequest.serialize,
+ response_deserializer=pipeline_service.ListTrainingPipelinesResponse.deserialize,
+ )
+ return self._stubs["list_training_pipelines"]
+
+ @property
+ def delete_training_pipeline(
+ self,
+ ) -> Callable[
+ [pipeline_service.DeleteTrainingPipelineRequest], operations.Operation
+ ]:
+ r"""Return a callable for the delete training pipeline method over gRPC.
+
+ Deletes a TrainingPipeline.
+
+ Returns:
+ Callable[[~.DeleteTrainingPipelineRequest],
+ ~.Operation]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "delete_training_pipeline" not in self._stubs:
+ self._stubs["delete_training_pipeline"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.PipelineService/DeleteTrainingPipeline",
+ request_serializer=pipeline_service.DeleteTrainingPipelineRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["delete_training_pipeline"]
+
+ @property
+ def cancel_training_pipeline(
+ self,
+ ) -> Callable[[pipeline_service.CancelTrainingPipelineRequest], empty.Empty]:
+ r"""Return a callable for the cancel training pipeline method over gRPC.
+
+ Cancels a TrainingPipeline. Starts asynchronous cancellation on
+ the TrainingPipeline. The server makes a best effort to cancel
+ the pipeline, but success is not guaranteed. Clients can use
+ ``PipelineService.GetTrainingPipeline``
+ or other methods to check whether the cancellation succeeded or
+ whether the pipeline completed despite cancellation. On
+ successful cancellation, the TrainingPipeline is not deleted;
+ instead it becomes a pipeline with a
+ ``TrainingPipeline.error``
+ value with a ``google.rpc.Status.code`` of
+ 1, corresponding to ``Code.CANCELLED``, and
+ ``TrainingPipeline.state``
+ is set to ``CANCELLED``.
+
+ Returns:
+ Callable[[~.CancelTrainingPipelineRequest],
+ ~.Empty]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "cancel_training_pipeline" not in self._stubs:
+ self._stubs["cancel_training_pipeline"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.PipelineService/CancelTrainingPipeline",
+ request_serializer=pipeline_service.CancelTrainingPipelineRequest.serialize,
+ response_deserializer=empty.Empty.FromString,
+ )
+ return self._stubs["cancel_training_pipeline"]
+
+
+__all__ = ("PipelineServiceGrpcTransport",)
diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py
new file mode 100644
index 0000000000..ceed94071f
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py
@@ -0,0 +1,435 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import warnings
+from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple
+
+from google.api_core import gapic_v1 # type: ignore
+from google.api_core import grpc_helpers_async # type: ignore
+from google.api_core import operations_v1 # type: ignore
+from google import auth # type: ignore
+from google.auth import credentials # type: ignore
+from google.auth.transport.grpc import SslCredentials # type: ignore
+
+import grpc # type: ignore
+from grpc.experimental import aio # type: ignore
+
+from google.cloud.aiplatform_v1.types import pipeline_service
+from google.cloud.aiplatform_v1.types import training_pipeline
+from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline
+from google.longrunning import operations_pb2 as operations # type: ignore
+from google.protobuf import empty_pb2 as empty # type: ignore
+
+from .base import PipelineServiceTransport, DEFAULT_CLIENT_INFO
+from .grpc import PipelineServiceGrpcTransport
+
+
+class PipelineServiceGrpcAsyncIOTransport(PipelineServiceTransport):
+ """gRPC AsyncIO backend transport for PipelineService.
+
+ A service for creating and managing AI Platform's pipelines.
+
+ This class defines the same methods as the primary client, so the
+ primary client can load the underlying transport implementation
+ and call it.
+
+ It sends protocol buffers over the wire using gRPC (which is built on
+ top of HTTP/2); the ``grpcio`` package must be installed.
+ """
+
+ _grpc_channel: aio.Channel
+ _stubs: Dict[str, Callable] = {}
+
+ @classmethod
+ def create_channel(
+ cls,
+ host: str = "aiplatform.googleapis.com",
+ credentials: credentials.Credentials = None,
+ credentials_file: Optional[str] = None,
+ scopes: Optional[Sequence[str]] = None,
+ quota_project_id: Optional[str] = None,
+ **kwargs,
+ ) -> aio.Channel:
+ """Create and return a gRPC AsyncIO channel object.
+ Args:
+ address (Optional[str]): The host for the channel to use.
+ credentials (Optional[~.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify this application to the service. If
+ none are specified, the client will attempt to ascertain
+ the credentials from the environment.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is ignored if ``channel`` is provided.
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
+ service. These are only used when credentials are not specified and
+ are passed to :func:`google.auth.default`.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ kwargs (Optional[dict]): Keyword arguments, which are passed to the
+ channel creation.
+ Returns:
+ aio.Channel: A gRPC AsyncIO channel object.
+ """
+ scopes = scopes or cls.AUTH_SCOPES
+ return grpc_helpers_async.create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes,
+ quota_project_id=quota_project_id,
+ **kwargs,
+ )
+
+ def __init__(
+ self,
+ *,
+ host: str = "aiplatform.googleapis.com",
+ credentials: credentials.Credentials = None,
+ credentials_file: Optional[str] = None,
+ scopes: Optional[Sequence[str]] = None,
+ channel: aio.Channel = None,
+ api_mtls_endpoint: str = None,
+ client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
+ ssl_channel_credentials: grpc.ChannelCredentials = None,
+ client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
+ quota_project_id=None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ ) -> None:
+ """Instantiate the transport.
+
+ Args:
+ host (Optional[str]): The hostname to connect to.
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ This argument is ignored if ``channel`` is provided.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is ignored if ``channel`` is provided.
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
+ service. These are only used when credentials are not specified and
+ are passed to :func:`google.auth.default`.
+ channel (Optional[aio.Channel]): A ``Channel`` instance through
+ which to make calls.
+ api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
+ If provided, it overrides the ``host`` argument and tries to create
+ a mutual TLS channel with client SSL credentials from
+ ``client_cert_source`` or applicatin default SSL credentials.
+ client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
+ Deprecated. A callback to provide client SSL certificate bytes and
+ private key bytes, both in PEM format. It is ignored if
+ ``api_mtls_endpoint`` is None.
+ ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
+ for grpc channel. It is ignored if ``channel`` is provided.
+ client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
+ A callback to provide client certificate bytes and private key bytes,
+ both in PEM format. It is used to configure mutual TLS channel. It is
+ ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+
+ Raises:
+ google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
+ creation failed for any reason.
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
+ and ``credentials_file`` are passed.
+ """
+ self._ssl_channel_credentials = ssl_channel_credentials
+
+ if api_mtls_endpoint:
+ warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
+ if client_cert_source:
+ warnings.warn("client_cert_source is deprecated", DeprecationWarning)
+
+ if channel:
+ # Sanity check: Ensure that channel and credentials are not both
+ # provided.
+ credentials = False
+
+ # If a channel was explicitly provided, set it.
+ self._grpc_channel = channel
+ self._ssl_channel_credentials = None
+ elif api_mtls_endpoint:
+ host = (
+ api_mtls_endpoint
+ if ":" in api_mtls_endpoint
+ else api_mtls_endpoint + ":443"
+ )
+
+ if credentials is None:
+ credentials, _ = auth.default(
+ scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
+ )
+
+ # Create SSL credentials with client_cert_source or application
+ # default SSL credentials.
+ if client_cert_source:
+ cert, key = client_cert_source()
+ ssl_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+ else:
+ ssl_credentials = SslCredentials().ssl_credentials
+
+ # create a new channel. The provided one is ignored.
+ self._grpc_channel = type(self).create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ ssl_credentials=ssl_credentials,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ self._ssl_channel_credentials = ssl_credentials
+ else:
+ host = host if ":" in host else host + ":443"
+
+ if credentials is None:
+ credentials, _ = auth.default(
+ scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
+ )
+
+ if client_cert_source_for_mtls and not ssl_channel_credentials:
+ cert, key = client_cert_source_for_mtls()
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+
+ # create a new channel. The provided one is ignored.
+ self._grpc_channel = type(self).create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ ssl_credentials=self._ssl_channel_credentials,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+ # Run the base constructor.
+ super().__init__(
+ host=host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ client_info=client_info,
+ )
+
+ self._stubs = {}
+ self._operations_client = None
+
+ @property
+ def grpc_channel(self) -> aio.Channel:
+ """Create the channel designed to connect to this service.
+
+ This property caches on the instance; repeated calls return
+ the same channel.
+ """
+ # Return the channel from cache.
+ return self._grpc_channel
+
+ @property
+ def operations_client(self) -> operations_v1.OperationsAsyncClient:
+ """Create the client designed to process long-running operations.
+
+ This property caches on the instance; repeated calls return the same
+ client.
+ """
+ # Sanity check: Only create a new client if we do not already have one.
+ if self._operations_client is None:
+ self._operations_client = operations_v1.OperationsAsyncClient(
+ self.grpc_channel
+ )
+
+ # Return the client from cache.
+ return self._operations_client
+
+ @property
+ def create_training_pipeline(
+ self,
+ ) -> Callable[
+ [pipeline_service.CreateTrainingPipelineRequest],
+ Awaitable[gca_training_pipeline.TrainingPipeline],
+ ]:
+ r"""Return a callable for the create training pipeline method over gRPC.
+
+ Creates a TrainingPipeline. A created
+ TrainingPipeline right away will be attempted to be run.
+
+ Returns:
+ Callable[[~.CreateTrainingPipelineRequest],
+ Awaitable[~.TrainingPipeline]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "create_training_pipeline" not in self._stubs:
+ self._stubs["create_training_pipeline"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.PipelineService/CreateTrainingPipeline",
+ request_serializer=pipeline_service.CreateTrainingPipelineRequest.serialize,
+ response_deserializer=gca_training_pipeline.TrainingPipeline.deserialize,
+ )
+ return self._stubs["create_training_pipeline"]
+
+ @property
+ def get_training_pipeline(
+ self,
+ ) -> Callable[
+ [pipeline_service.GetTrainingPipelineRequest],
+ Awaitable[training_pipeline.TrainingPipeline],
+ ]:
+ r"""Return a callable for the get training pipeline method over gRPC.
+
+ Gets a TrainingPipeline.
+
+ Returns:
+ Callable[[~.GetTrainingPipelineRequest],
+ Awaitable[~.TrainingPipeline]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "get_training_pipeline" not in self._stubs:
+ self._stubs["get_training_pipeline"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.PipelineService/GetTrainingPipeline",
+ request_serializer=pipeline_service.GetTrainingPipelineRequest.serialize,
+ response_deserializer=training_pipeline.TrainingPipeline.deserialize,
+ )
+ return self._stubs["get_training_pipeline"]
+
+ @property
+ def list_training_pipelines(
+ self,
+ ) -> Callable[
+ [pipeline_service.ListTrainingPipelinesRequest],
+ Awaitable[pipeline_service.ListTrainingPipelinesResponse],
+ ]:
+ r"""Return a callable for the list training pipelines method over gRPC.
+
+ Lists TrainingPipelines in a Location.
+
+ Returns:
+ Callable[[~.ListTrainingPipelinesRequest],
+ Awaitable[~.ListTrainingPipelinesResponse]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_training_pipelines" not in self._stubs:
+ self._stubs["list_training_pipelines"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.PipelineService/ListTrainingPipelines",
+ request_serializer=pipeline_service.ListTrainingPipelinesRequest.serialize,
+ response_deserializer=pipeline_service.ListTrainingPipelinesResponse.deserialize,
+ )
+ return self._stubs["list_training_pipelines"]
+
+ @property
+ def delete_training_pipeline(
+ self,
+ ) -> Callable[
+ [pipeline_service.DeleteTrainingPipelineRequest],
+ Awaitable[operations.Operation],
+ ]:
+ r"""Return a callable for the delete training pipeline method over gRPC.
+
+ Deletes a TrainingPipeline.
+
+ Returns:
+ Callable[[~.DeleteTrainingPipelineRequest],
+ Awaitable[~.Operation]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "delete_training_pipeline" not in self._stubs:
+ self._stubs["delete_training_pipeline"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.PipelineService/DeleteTrainingPipeline",
+ request_serializer=pipeline_service.DeleteTrainingPipelineRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["delete_training_pipeline"]
+
+ @property
+ def cancel_training_pipeline(
+ self,
+ ) -> Callable[
+ [pipeline_service.CancelTrainingPipelineRequest], Awaitable[empty.Empty]
+ ]:
+ r"""Return a callable for the cancel training pipeline method over gRPC.
+
+ Cancels a TrainingPipeline. Starts asynchronous cancellation on
+ the TrainingPipeline. The server makes a best effort to cancel
+ the pipeline, but success is not guaranteed. Clients can use
+ ``PipelineService.GetTrainingPipeline``
+ or other methods to check whether the cancellation succeeded or
+ whether the pipeline completed despite cancellation. On
+ successful cancellation, the TrainingPipeline is not deleted;
+ instead it becomes a pipeline with a
+ ``TrainingPipeline.error``
+ value with a ``google.rpc.Status.code`` of
+ 1, corresponding to ``Code.CANCELLED``, and
+ ``TrainingPipeline.state``
+ is set to ``CANCELLED``.
+
+ Returns:
+ Callable[[~.CancelTrainingPipelineRequest],
+ Awaitable[~.Empty]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "cancel_training_pipeline" not in self._stubs:
+ self._stubs["cancel_training_pipeline"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.PipelineService/CancelTrainingPipeline",
+ request_serializer=pipeline_service.CancelTrainingPipelineRequest.serialize,
+ response_deserializer=empty.Empty.FromString,
+ )
+ return self._stubs["cancel_training_pipeline"]
+
+
+__all__ = ("PipelineServiceGrpcAsyncIOTransport",)
diff --git a/google/cloud/aiplatform_v1/services/prediction_service/__init__.py b/google/cloud/aiplatform_v1/services/prediction_service/__init__.py
new file mode 100644
index 0000000000..0c847693e0
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/prediction_service/__init__.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from .client import PredictionServiceClient
+from .async_client import PredictionServiceAsyncClient
+
+__all__ = (
+ "PredictionServiceClient",
+ "PredictionServiceAsyncClient",
+)
diff --git a/google/cloud/aiplatform_v1/services/prediction_service/async_client.py b/google/cloud/aiplatform_v1/services/prediction_service/async_client.py
new file mode 100644
index 0000000000..c0ab09622c
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/prediction_service/async_client.py
@@ -0,0 +1,263 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from collections import OrderedDict
+import functools
+import re
+from typing import Dict, Sequence, Tuple, Type, Union
+import pkg_resources
+
+import google.api_core.client_options as ClientOptions # type: ignore
+from google.api_core import exceptions # type: ignore
+from google.api_core import gapic_v1 # type: ignore
+from google.api_core import retry as retries # type: ignore
+from google.auth import credentials # type: ignore
+from google.oauth2 import service_account # type: ignore
+
+from google.cloud.aiplatform_v1.types import prediction_service
+from google.protobuf import struct_pb2 as struct # type: ignore
+
+from .transports.base import PredictionServiceTransport, DEFAULT_CLIENT_INFO
+from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport
+from .client import PredictionServiceClient
+
+
+class PredictionServiceAsyncClient:
+ """A service for online predictions and explanations."""
+
+ _client: PredictionServiceClient
+
+ DEFAULT_ENDPOINT = PredictionServiceClient.DEFAULT_ENDPOINT
+ DEFAULT_MTLS_ENDPOINT = PredictionServiceClient.DEFAULT_MTLS_ENDPOINT
+
+ endpoint_path = staticmethod(PredictionServiceClient.endpoint_path)
+ parse_endpoint_path = staticmethod(PredictionServiceClient.parse_endpoint_path)
+
+ common_billing_account_path = staticmethod(
+ PredictionServiceClient.common_billing_account_path
+ )
+ parse_common_billing_account_path = staticmethod(
+ PredictionServiceClient.parse_common_billing_account_path
+ )
+
+ common_folder_path = staticmethod(PredictionServiceClient.common_folder_path)
+ parse_common_folder_path = staticmethod(
+ PredictionServiceClient.parse_common_folder_path
+ )
+
+ common_organization_path = staticmethod(
+ PredictionServiceClient.common_organization_path
+ )
+ parse_common_organization_path = staticmethod(
+ PredictionServiceClient.parse_common_organization_path
+ )
+
+ common_project_path = staticmethod(PredictionServiceClient.common_project_path)
+ parse_common_project_path = staticmethod(
+ PredictionServiceClient.parse_common_project_path
+ )
+
+ common_location_path = staticmethod(PredictionServiceClient.common_location_path)
+ parse_common_location_path = staticmethod(
+ PredictionServiceClient.parse_common_location_path
+ )
+
+ from_service_account_info = PredictionServiceClient.from_service_account_info
+ from_service_account_file = PredictionServiceClient.from_service_account_file
+ from_service_account_json = from_service_account_file
+
+ @property
+ def transport(self) -> PredictionServiceTransport:
+ """Return the transport used by the client instance.
+
+ Returns:
+ PredictionServiceTransport: The transport used by the client instance.
+ """
+ return self._client.transport
+
+ get_transport_class = functools.partial(
+ type(PredictionServiceClient).get_transport_class, type(PredictionServiceClient)
+ )
+
+ def __init__(
+ self,
+ *,
+ credentials: credentials.Credentials = None,
+ transport: Union[str, PredictionServiceTransport] = "grpc_asyncio",
+ client_options: ClientOptions = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ ) -> None:
+ """Instantiate the prediction service client.
+
+ Args:
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ transport (Union[str, ~.PredictionServiceTransport]): The
+ transport to use. If set to None, a transport is chosen
+ automatically.
+ client_options (ClientOptions): Custom options for the client. It
+ won't take effect if a ``transport`` instance is provided.
+ (1) The ``api_endpoint`` property can be used to override the
+ default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
+ environment variable can also be used to override the endpoint:
+ "always" (always use the default mTLS endpoint), "never" (always
+ use the default regular endpoint) and "auto" (auto switch to the
+ default mTLS endpoint if client certificate is present, this is
+ the default value). However, the ``api_endpoint`` property takes
+ precedence if provided.
+ (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
+ is "true", then the ``client_cert_source`` property can be used
+ to provide client certificate for mutual TLS transport. If
+ not provided, the default SSL client certificate will be used if
+ present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
+ set, no client certificate will be used.
+
+ Raises:
+ google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
+ creation failed for any reason.
+ """
+
+ self._client = PredictionServiceClient(
+ credentials=credentials,
+ transport=transport,
+ client_options=client_options,
+ client_info=client_info,
+ )
+
+ async def predict(
+ self,
+ request: prediction_service.PredictRequest = None,
+ *,
+ endpoint: str = None,
+ instances: Sequence[struct.Value] = None,
+ parameters: struct.Value = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> prediction_service.PredictResponse:
+ r"""Perform an online prediction.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.PredictRequest`):
+ The request object. Request message for
+ ``PredictionService.Predict``.
+ endpoint (:class:`str`):
+ Required. The name of the Endpoint requested to serve
+ the prediction. Format:
+ ``projects/{project}/locations/{location}/endpoints/{endpoint}``
+
+ This corresponds to the ``endpoint`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ instances (:class:`Sequence[google.protobuf.struct_pb2.Value]`):
+ Required. The instances that are the input to the
+ prediction call. A DeployedModel may have an upper limit
+ on the number of instances it supports per request, and
+ when it is exceeded the prediction call errors in case
+ of AutoML Models, or, in case of customer created
+ Models, the behaviour is as documented by that Model.
+ The schema of any single instance may be specified via
+ Endpoint's DeployedModels'
+ [Model's][google.cloud.aiplatform.v1.DeployedModel.model]
+ [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata]
+ ``instance_schema_uri``.
+
+ This corresponds to the ``instances`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ parameters (:class:`google.protobuf.struct_pb2.Value`):
+ The parameters that govern the prediction. The schema of
+ the parameters may be specified via Endpoint's
+ DeployedModels' [Model's
+ ][google.cloud.aiplatform.v1.DeployedModel.model]
+ [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata]
+ ``parameters_schema_uri``.
+
+ This corresponds to the ``parameters`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.types.PredictResponse:
+ Response message for
+ ``PredictionService.Predict``.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([endpoint, instances, parameters])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = prediction_service.PredictRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if endpoint is not None:
+ request.endpoint = endpoint
+ if parameters is not None:
+ request.parameters = parameters
+
+ if instances:
+ request.instances.extend(instances)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.predict,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("endpoint", request.endpoint),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+
+try:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=pkg_resources.get_distribution(
+ "google-cloud-aiplatform",
+ ).version,
+ )
+except pkg_resources.DistributionNotFound:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
+
+
+__all__ = ("PredictionServiceAsyncClient",)
diff --git a/google/cloud/aiplatform_v1/services/prediction_service/client.py b/google/cloud/aiplatform_v1/services/prediction_service/client.py
new file mode 100644
index 0000000000..55c52b48f4
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/prediction_service/client.py
@@ -0,0 +1,468 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from collections import OrderedDict
+from distutils import util
+import os
+import re
+from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
+import pkg_resources
+
+from google.api_core import client_options as client_options_lib # type: ignore
+from google.api_core import exceptions # type: ignore
+from google.api_core import gapic_v1 # type: ignore
+from google.api_core import retry as retries # type: ignore
+from google.auth import credentials # type: ignore
+from google.auth.transport import mtls # type: ignore
+from google.auth.transport.grpc import SslCredentials # type: ignore
+from google.auth.exceptions import MutualTLSChannelError # type: ignore
+from google.oauth2 import service_account # type: ignore
+
+from google.cloud.aiplatform_v1.types import prediction_service
+from google.protobuf import struct_pb2 as struct # type: ignore
+
+from .transports.base import PredictionServiceTransport, DEFAULT_CLIENT_INFO
+from .transports.grpc import PredictionServiceGrpcTransport
+from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport
+
+
+class PredictionServiceClientMeta(type):
+ """Metaclass for the PredictionService client.
+
+ This provides class-level methods for building and retrieving
+ support objects (e.g. transport) without polluting the client instance
+ objects.
+ """
+
+ _transport_registry = (
+ OrderedDict()
+ ) # type: Dict[str, Type[PredictionServiceTransport]]
+ _transport_registry["grpc"] = PredictionServiceGrpcTransport
+ _transport_registry["grpc_asyncio"] = PredictionServiceGrpcAsyncIOTransport
+
+ def get_transport_class(
+ cls, label: str = None,
+ ) -> Type[PredictionServiceTransport]:
+ """Return an appropriate transport class.
+
+ Args:
+ label: The name of the desired transport. If none is
+ provided, then the first transport in the registry is used.
+
+ Returns:
+ The transport class to use.
+ """
+ # If a specific transport is requested, return that one.
+ if label:
+ return cls._transport_registry[label]
+
+ # No transport is requested; return the default (that is, the first one
+ # in the dictionary).
+ return next(iter(cls._transport_registry.values()))
+
+
+class PredictionServiceClient(metaclass=PredictionServiceClientMeta):
+ """A service for online predictions and explanations."""
+
+ @staticmethod
+ def _get_default_mtls_endpoint(api_endpoint):
+ """Convert api endpoint to mTLS endpoint.
+ Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
+ "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
+ Args:
+ api_endpoint (Optional[str]): the api endpoint to convert.
+ Returns:
+ str: converted mTLS api endpoint.
+ """
+ if not api_endpoint:
+ return api_endpoint
+
+ mtls_endpoint_re = re.compile(
+ r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?"
+ )
+
+ m = mtls_endpoint_re.match(api_endpoint)
+ name, mtls, sandbox, googledomain = m.groups()
+ if mtls or not googledomain:
+ return api_endpoint
+
+ if sandbox:
+ return api_endpoint.replace(
+ "sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
+ )
+
+ return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
+
+ DEFAULT_ENDPOINT = "aiplatform.googleapis.com"
+ DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
+ DEFAULT_ENDPOINT
+ )
+
+ @classmethod
+ def from_service_account_info(cls, info: dict, *args, **kwargs):
+ """Creates an instance of this client using the provided credentials info.
+
+ Args:
+ info (dict): The service account private key info.
+ args: Additional arguments to pass to the constructor.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ PredictionServiceClient: The constructed client.
+ """
+ credentials = service_account.Credentials.from_service_account_info(info)
+ kwargs["credentials"] = credentials
+ return cls(*args, **kwargs)
+
+ @classmethod
+ def from_service_account_file(cls, filename: str, *args, **kwargs):
+ """Creates an instance of this client using the provided credentials
+ file.
+
+ Args:
+ filename (str): The path to the service account private key json
+ file.
+ args: Additional arguments to pass to the constructor.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ PredictionServiceClient: The constructed client.
+ """
+ credentials = service_account.Credentials.from_service_account_file(filename)
+ kwargs["credentials"] = credentials
+ return cls(*args, **kwargs)
+
+ from_service_account_json = from_service_account_file
+
+ @property
+ def transport(self) -> PredictionServiceTransport:
+ """Return the transport used by the client instance.
+
+ Returns:
+ PredictionServiceTransport: The transport used by the client instance.
+ """
+ return self._transport
+
+ @staticmethod
+ def endpoint_path(project: str, location: str, endpoint: str,) -> str:
+ """Return a fully-qualified endpoint string."""
+ return "projects/{project}/locations/{location}/endpoints/{endpoint}".format(
+ project=project, location=location, endpoint=endpoint,
+ )
+
+ @staticmethod
+ def parse_endpoint_path(path: str) -> Dict[str, str]:
+ """Parse a endpoint path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_billing_account_path(billing_account: str,) -> str:
+ """Return a fully-qualified billing_account string."""
+ return "billingAccounts/{billing_account}".format(
+ billing_account=billing_account,
+ )
+
+ @staticmethod
+ def parse_common_billing_account_path(path: str) -> Dict[str, str]:
+ """Parse a billing_account path into its component segments."""
+ m = re.match(r"^billingAccounts/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_folder_path(folder: str,) -> str:
+ """Return a fully-qualified folder string."""
+ return "folders/{folder}".format(folder=folder,)
+
+ @staticmethod
+ def parse_common_folder_path(path: str) -> Dict[str, str]:
+ """Parse a folder path into its component segments."""
+ m = re.match(r"^folders/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_organization_path(organization: str,) -> str:
+ """Return a fully-qualified organization string."""
+ return "organizations/{organization}".format(organization=organization,)
+
+ @staticmethod
+ def parse_common_organization_path(path: str) -> Dict[str, str]:
+ """Parse a organization path into its component segments."""
+ m = re.match(r"^organizations/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_project_path(project: str,) -> str:
+ """Return a fully-qualified project string."""
+ return "projects/{project}".format(project=project,)
+
+ @staticmethod
+ def parse_common_project_path(path: str) -> Dict[str, str]:
+ """Parse a project path into its component segments."""
+ m = re.match(r"^projects/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_location_path(project: str, location: str,) -> str:
+ """Return a fully-qualified location string."""
+ return "projects/{project}/locations/{location}".format(
+ project=project, location=location,
+ )
+
+ @staticmethod
+ def parse_common_location_path(path: str) -> Dict[str, str]:
+ """Parse a location path into its component segments."""
+ m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ def __init__(
+ self,
+ *,
+ credentials: Optional[credentials.Credentials] = None,
+ transport: Union[str, PredictionServiceTransport, None] = None,
+ client_options: Optional[client_options_lib.ClientOptions] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ ) -> None:
+ """Instantiate the prediction service client.
+
+ Args:
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ transport (Union[str, PredictionServiceTransport]): The
+ transport to use. If set to None, a transport is chosen
+ automatically.
+ client_options (google.api_core.client_options.ClientOptions): Custom options for the
+ client. It won't take effect if a ``transport`` instance is provided.
+ (1) The ``api_endpoint`` property can be used to override the
+ default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
+ environment variable can also be used to override the endpoint:
+ "always" (always use the default mTLS endpoint), "never" (always
+ use the default regular endpoint) and "auto" (auto switch to the
+ default mTLS endpoint if client certificate is present, this is
+ the default value). However, the ``api_endpoint`` property takes
+ precedence if provided.
+ (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
+ is "true", then the ``client_cert_source`` property can be used
+ to provide client certificate for mutual TLS transport. If
+ not provided, the default SSL client certificate will be used if
+ present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
+ set, no client certificate will be used.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+
+ Raises:
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
+ creation failed for any reason.
+ """
+ if isinstance(client_options, dict):
+ client_options = client_options_lib.from_dict(client_options)
+ if client_options is None:
+ client_options = client_options_lib.ClientOptions()
+
+ # Create SSL credentials for mutual TLS if needed.
+ use_client_cert = bool(
+ util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
+ )
+
+ client_cert_source_func = None
+ is_mtls = False
+ if use_client_cert:
+ if client_options.client_cert_source:
+ is_mtls = True
+ client_cert_source_func = client_options.client_cert_source
+ else:
+ is_mtls = mtls.has_default_client_cert_source()
+ client_cert_source_func = (
+ mtls.default_client_cert_source() if is_mtls else None
+ )
+
+ # Figure out which api endpoint to use.
+ if client_options.api_endpoint is not None:
+ api_endpoint = client_options.api_endpoint
+ else:
+ use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
+ if use_mtls_env == "never":
+ api_endpoint = self.DEFAULT_ENDPOINT
+ elif use_mtls_env == "always":
+ api_endpoint = self.DEFAULT_MTLS_ENDPOINT
+ elif use_mtls_env == "auto":
+ api_endpoint = (
+ self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT
+ )
+ else:
+ raise MutualTLSChannelError(
+ "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
+ )
+
+ # Save or instantiate the transport.
+ # Ordinarily, we provide the transport, but allowing a custom transport
+ # instance provides an extensibility point for unusual situations.
+ if isinstance(transport, PredictionServiceTransport):
+ # transport is a PredictionServiceTransport instance.
+ if credentials or client_options.credentials_file:
+ raise ValueError(
+ "When providing a transport instance, "
+ "provide its credentials directly."
+ )
+ if client_options.scopes:
+ raise ValueError(
+ "When providing a transport instance, "
+ "provide its scopes directly."
+ )
+ self._transport = transport
+ else:
+ Transport = type(self).get_transport_class(transport)
+ self._transport = Transport(
+ credentials=credentials,
+ credentials_file=client_options.credentials_file,
+ host=api_endpoint,
+ scopes=client_options.scopes,
+ client_cert_source_for_mtls=client_cert_source_func,
+ quota_project_id=client_options.quota_project_id,
+ client_info=client_info,
+ )
+
+ def predict(
+ self,
+ request: prediction_service.PredictRequest = None,
+ *,
+ endpoint: str = None,
+ instances: Sequence[struct.Value] = None,
+ parameters: struct.Value = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> prediction_service.PredictResponse:
+ r"""Perform an online prediction.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.PredictRequest):
+ The request object. Request message for
+ ``PredictionService.Predict``.
+ endpoint (str):
+ Required. The name of the Endpoint requested to serve
+ the prediction. Format:
+ ``projects/{project}/locations/{location}/endpoints/{endpoint}``
+
+ This corresponds to the ``endpoint`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ instances (Sequence[google.protobuf.struct_pb2.Value]):
+ Required. The instances that are the input to the
+ prediction call. A DeployedModel may have an upper limit
+ on the number of instances it supports per request, and
+ when it is exceeded the prediction call errors in case
+ of AutoML Models, or, in case of customer created
+ Models, the behaviour is as documented by that Model.
+ The schema of any single instance may be specified via
+ Endpoint's DeployedModels'
+ [Model's][google.cloud.aiplatform.v1.DeployedModel.model]
+ [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata]
+ ``instance_schema_uri``.
+
+ This corresponds to the ``instances`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ parameters (google.protobuf.struct_pb2.Value):
+ The parameters that govern the prediction. The schema of
+ the parameters may be specified via Endpoint's
+ DeployedModels' [Model's
+ ][google.cloud.aiplatform.v1.DeployedModel.model]
+ [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata]
+ ``parameters_schema_uri``.
+
+ This corresponds to the ``parameters`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.types.PredictResponse:
+ Response message for
+ ``PredictionService.Predict``.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([endpoint, instances, parameters])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a prediction_service.PredictRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, prediction_service.PredictRequest):
+ request = prediction_service.PredictRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if endpoint is not None:
+ request.endpoint = endpoint
+ if parameters is not None:
+ request.parameters = parameters
+
+ if instances:
+ request.instances.extend(instances)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.predict]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("endpoint", request.endpoint),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+
+try:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=pkg_resources.get_distribution(
+ "google-cloud-aiplatform",
+ ).version,
+ )
+except pkg_resources.DistributionNotFound:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
+
+
+__all__ = ("PredictionServiceClient",)
diff --git a/google/cloud/aiplatform_v1/services/prediction_service/transports/__init__.py b/google/cloud/aiplatform_v1/services/prediction_service/transports/__init__.py
new file mode 100644
index 0000000000..9ec1369a05
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/prediction_service/transports/__init__.py
@@ -0,0 +1,35 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from collections import OrderedDict
+from typing import Dict, Type
+
+from .base import PredictionServiceTransport
+from .grpc import PredictionServiceGrpcTransport
+from .grpc_asyncio import PredictionServiceGrpcAsyncIOTransport
+
+
+# Compile a registry of transports.
+_transport_registry = OrderedDict() # type: Dict[str, Type[PredictionServiceTransport]]
+_transport_registry["grpc"] = PredictionServiceGrpcTransport
+_transport_registry["grpc_asyncio"] = PredictionServiceGrpcAsyncIOTransport
+
+__all__ = (
+ "PredictionServiceTransport",
+ "PredictionServiceGrpcTransport",
+ "PredictionServiceGrpcAsyncIOTransport",
+)
diff --git a/google/cloud/aiplatform_v1/services/prediction_service/transports/base.py b/google/cloud/aiplatform_v1/services/prediction_service/transports/base.py
new file mode 100644
index 0000000000..311639daaf
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/prediction_service/transports/base.py
@@ -0,0 +1,127 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import abc
+import typing
+import pkg_resources
+
+from google import auth # type: ignore
+from google.api_core import exceptions # type: ignore
+from google.api_core import gapic_v1 # type: ignore
+from google.api_core import retry as retries # type: ignore
+from google.auth import credentials # type: ignore
+
+from google.cloud.aiplatform_v1.types import prediction_service
+
+
+try:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=pkg_resources.get_distribution(
+ "google-cloud-aiplatform",
+ ).version,
+ )
+except pkg_resources.DistributionNotFound:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
+
+
+class PredictionServiceTransport(abc.ABC):
+ """Abstract transport class for PredictionService."""
+
+ AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
+
+ def __init__(
+ self,
+ *,
+ host: str = "aiplatform.googleapis.com",
+ credentials: credentials.Credentials = None,
+ credentials_file: typing.Optional[str] = None,
+ scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES,
+ quota_project_id: typing.Optional[str] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ **kwargs,
+ ) -> None:
+ """Instantiate the transport.
+
+ Args:
+ host (Optional[str]): The hostname to connect to.
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is mutually exclusive with credentials.
+ scope (Optional[Sequence[str]]): A list of scopes.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+ """
+ # Save the hostname. Default to port 443 (HTTPS) if none is specified.
+ if ":" not in host:
+ host += ":443"
+ self._host = host
+
+ # If no credentials are provided, then determine the appropriate
+ # defaults.
+ if credentials and credentials_file:
+ raise exceptions.DuplicateCredentialArgs(
+ "'credentials_file' and 'credentials' are mutually exclusive"
+ )
+
+ if credentials_file is not None:
+ credentials, _ = auth.load_credentials_from_file(
+ credentials_file, scopes=scopes, quota_project_id=quota_project_id
+ )
+
+ elif credentials is None:
+ credentials, _ = auth.default(
+ scopes=scopes, quota_project_id=quota_project_id
+ )
+
+ # Save the credentials.
+ self._credentials = credentials
+
+ # Lifted into its own function so it can be stubbed out during tests.
+ self._prep_wrapped_messages(client_info)
+
+ def _prep_wrapped_messages(self, client_info):
+ # Precompute the wrapped methods.
+ self._wrapped_methods = {
+ self.predict: gapic_v1.method.wrap_method(
+ self.predict, default_timeout=None, client_info=client_info,
+ ),
+ }
+
+ @property
+ def predict(
+ self,
+ ) -> typing.Callable[
+ [prediction_service.PredictRequest],
+ typing.Union[
+ prediction_service.PredictResponse,
+ typing.Awaitable[prediction_service.PredictResponse],
+ ],
+ ]:
+ raise NotImplementedError()
+
+
+__all__ = ("PredictionServiceTransport",)
diff --git a/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py
new file mode 100644
index 0000000000..4fcfe5b442
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py
@@ -0,0 +1,280 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import warnings
+from typing import Callable, Dict, Optional, Sequence, Tuple
+
+from google.api_core import grpc_helpers # type: ignore
+from google.api_core import gapic_v1 # type: ignore
+from google import auth # type: ignore
+from google.auth import credentials # type: ignore
+from google.auth.transport.grpc import SslCredentials # type: ignore
+
+import grpc # type: ignore
+
+from google.cloud.aiplatform_v1.types import prediction_service
+
+from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO
+
+
+class PredictionServiceGrpcTransport(PredictionServiceTransport):
+ """gRPC backend transport for PredictionService.
+
+ A service for online predictions and explanations.
+
+ This class defines the same methods as the primary client, so the
+ primary client can load the underlying transport implementation
+ and call it.
+
+ It sends protocol buffers over the wire using gRPC (which is built on
+ top of HTTP/2); the ``grpcio`` package must be installed.
+ """
+
+ _stubs: Dict[str, Callable]
+
+ def __init__(
+ self,
+ *,
+ host: str = "aiplatform.googleapis.com",
+ credentials: credentials.Credentials = None,
+ credentials_file: str = None,
+ scopes: Sequence[str] = None,
+ channel: grpc.Channel = None,
+ api_mtls_endpoint: str = None,
+ client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
+ ssl_channel_credentials: grpc.ChannelCredentials = None,
+ client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
+ quota_project_id: Optional[str] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ ) -> None:
+ """Instantiate the transport.
+
+ Args:
+ host (Optional[str]): The hostname to connect to.
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ This argument is ignored if ``channel`` is provided.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is ignored if ``channel`` is provided.
+ scopes (Optional(Sequence[str])): A list of scopes. This argument is
+ ignored if ``channel`` is provided.
+ channel (Optional[grpc.Channel]): A ``Channel`` instance through
+ which to make calls.
+ api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
+ If provided, it overrides the ``host`` argument and tries to create
+ a mutual TLS channel with client SSL credentials from
+ ``client_cert_source`` or applicatin default SSL credentials.
+ client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
+ Deprecated. A callback to provide client SSL certificate bytes and
+ private key bytes, both in PEM format. It is ignored if
+ ``api_mtls_endpoint`` is None.
+ ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
+ for grpc channel. It is ignored if ``channel`` is provided.
+ client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
+ A callback to provide client certificate bytes and private key bytes,
+ both in PEM format. It is used to configure mutual TLS channel. It is
+ ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+
+ Raises:
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
+ creation failed for any reason.
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
+ and ``credentials_file`` are passed.
+ """
+ self._ssl_channel_credentials = ssl_channel_credentials
+
+ if api_mtls_endpoint:
+ warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
+ if client_cert_source:
+ warnings.warn("client_cert_source is deprecated", DeprecationWarning)
+
+ if channel:
+ # Sanity check: Ensure that channel and credentials are not both
+ # provided.
+ credentials = False
+
+ # If a channel was explicitly provided, set it.
+ self._grpc_channel = channel
+ self._ssl_channel_credentials = None
+ elif api_mtls_endpoint:
+ host = (
+ api_mtls_endpoint
+ if ":" in api_mtls_endpoint
+ else api_mtls_endpoint + ":443"
+ )
+
+ if credentials is None:
+ credentials, _ = auth.default(
+ scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
+ )
+
+ # Create SSL credentials with client_cert_source or application
+ # default SSL credentials.
+ if client_cert_source:
+ cert, key = client_cert_source()
+ ssl_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+ else:
+ ssl_credentials = SslCredentials().ssl_credentials
+
+ # create a new channel. The provided one is ignored.
+ self._grpc_channel = type(self).create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ ssl_credentials=ssl_credentials,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ self._ssl_channel_credentials = ssl_credentials
+ else:
+ host = host if ":" in host else host + ":443"
+
+ if credentials is None:
+ credentials, _ = auth.default(
+ scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
+ )
+
+ if client_cert_source_for_mtls and not ssl_channel_credentials:
+ cert, key = client_cert_source_for_mtls()
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+
+ # create a new channel. The provided one is ignored.
+ self._grpc_channel = type(self).create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ ssl_credentials=self._ssl_channel_credentials,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+ self._stubs = {} # type: Dict[str, Callable]
+
+ # Run the base constructor.
+ super().__init__(
+ host=host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ client_info=client_info,
+ )
+
+ @classmethod
+ def create_channel(
+ cls,
+ host: str = "aiplatform.googleapis.com",
+ credentials: credentials.Credentials = None,
+ credentials_file: str = None,
+ scopes: Optional[Sequence[str]] = None,
+ quota_project_id: Optional[str] = None,
+ **kwargs,
+ ) -> grpc.Channel:
+ """Create and return a gRPC channel object.
+ Args:
+ address (Optional[str]): The host for the channel to use.
+ credentials (Optional[~.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify this application to the service. If
+ none are specified, the client will attempt to ascertain
+ the credentials from the environment.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is mutually exclusive with credentials.
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
+ service. These are only used when credentials are not specified and
+ are passed to :func:`google.auth.default`.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ kwargs (Optional[dict]): Keyword arguments, which are passed to the
+ channel creation.
+ Returns:
+ grpc.Channel: A gRPC channel object.
+
+ Raises:
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
+ and ``credentials_file`` are passed.
+ """
+ scopes = scopes or cls.AUTH_SCOPES
+ return grpc_helpers.create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes,
+ quota_project_id=quota_project_id,
+ **kwargs,
+ )
+
+ @property
+ def grpc_channel(self) -> grpc.Channel:
+ """Return the channel designed to connect to this service.
+ """
+ return self._grpc_channel
+
+ @property
+ def predict(
+ self,
+ ) -> Callable[
+ [prediction_service.PredictRequest], prediction_service.PredictResponse
+ ]:
+ r"""Return a callable for the predict method over gRPC.
+
+ Perform an online prediction.
+
+ Returns:
+ Callable[[~.PredictRequest],
+ ~.PredictResponse]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "predict" not in self._stubs:
+ self._stubs["predict"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.PredictionService/Predict",
+ request_serializer=prediction_service.PredictRequest.serialize,
+ response_deserializer=prediction_service.PredictResponse.deserialize,
+ )
+ return self._stubs["predict"]
+
+
+__all__ = ("PredictionServiceGrpcTransport",)
diff --git a/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc_asyncio.py
new file mode 100644
index 0000000000..620f340813
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc_asyncio.py
@@ -0,0 +1,285 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import warnings
+from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple
+
+from google.api_core import gapic_v1 # type: ignore
+from google.api_core import grpc_helpers_async # type: ignore
+from google import auth # type: ignore
+from google.auth import credentials # type: ignore
+from google.auth.transport.grpc import SslCredentials # type: ignore
+
+import grpc # type: ignore
+from grpc.experimental import aio # type: ignore
+
+from google.cloud.aiplatform_v1.types import prediction_service
+
+from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO
+from .grpc import PredictionServiceGrpcTransport
+
+
+class PredictionServiceGrpcAsyncIOTransport(PredictionServiceTransport):
+ """gRPC AsyncIO backend transport for PredictionService.
+
+ A service for online predictions and explanations.
+
+ This class defines the same methods as the primary client, so the
+ primary client can load the underlying transport implementation
+ and call it.
+
+ It sends protocol buffers over the wire using gRPC (which is built on
+ top of HTTP/2); the ``grpcio`` package must be installed.
+ """
+
+ _grpc_channel: aio.Channel
+ _stubs: Dict[str, Callable] = {}
+
+ @classmethod
+ def create_channel(
+ cls,
+ host: str = "aiplatform.googleapis.com",
+ credentials: credentials.Credentials = None,
+ credentials_file: Optional[str] = None,
+ scopes: Optional[Sequence[str]] = None,
+ quota_project_id: Optional[str] = None,
+ **kwargs,
+ ) -> aio.Channel:
+ """Create and return a gRPC AsyncIO channel object.
+ Args:
+ address (Optional[str]): The host for the channel to use.
+ credentials (Optional[~.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify this application to the service. If
+ none are specified, the client will attempt to ascertain
+ the credentials from the environment.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is ignored if ``channel`` is provided.
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
+ service. These are only used when credentials are not specified and
+ are passed to :func:`google.auth.default`.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ kwargs (Optional[dict]): Keyword arguments, which are passed to the
+ channel creation.
+ Returns:
+ aio.Channel: A gRPC AsyncIO channel object.
+ """
+ scopes = scopes or cls.AUTH_SCOPES
+ return grpc_helpers_async.create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes,
+ quota_project_id=quota_project_id,
+ **kwargs,
+ )
+
+ def __init__(
+ self,
+ *,
+ host: str = "aiplatform.googleapis.com",
+ credentials: credentials.Credentials = None,
+ credentials_file: Optional[str] = None,
+ scopes: Optional[Sequence[str]] = None,
+ channel: aio.Channel = None,
+ api_mtls_endpoint: str = None,
+ client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
+ ssl_channel_credentials: grpc.ChannelCredentials = None,
+ client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
+ quota_project_id=None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ ) -> None:
+ """Instantiate the transport.
+
+ Args:
+ host (Optional[str]): The hostname to connect to.
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ This argument is ignored if ``channel`` is provided.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is ignored if ``channel`` is provided.
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
+ service. These are only used when credentials are not specified and
+ are passed to :func:`google.auth.default`.
+ channel (Optional[aio.Channel]): A ``Channel`` instance through
+ which to make calls.
+ api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
+ If provided, it overrides the ``host`` argument and tries to create
+ a mutual TLS channel with client SSL credentials from
+ ``client_cert_source`` or applicatin default SSL credentials.
+ client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
+ Deprecated. A callback to provide client SSL certificate bytes and
+ private key bytes, both in PEM format. It is ignored if
+ ``api_mtls_endpoint`` is None.
+ ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
+ for grpc channel. It is ignored if ``channel`` is provided.
+ client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
+ A callback to provide client certificate bytes and private key bytes,
+ both in PEM format. It is used to configure mutual TLS channel. It is
+ ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+
+ Raises:
+ google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
+ creation failed for any reason.
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
+ and ``credentials_file`` are passed.
+ """
+ self._ssl_channel_credentials = ssl_channel_credentials
+
+ if api_mtls_endpoint:
+ warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
+ if client_cert_source:
+ warnings.warn("client_cert_source is deprecated", DeprecationWarning)
+
+ if channel:
+ # Sanity check: Ensure that channel and credentials are not both
+ # provided.
+ credentials = False
+
+ # If a channel was explicitly provided, set it.
+ self._grpc_channel = channel
+ self._ssl_channel_credentials = None
+ elif api_mtls_endpoint:
+ host = (
+ api_mtls_endpoint
+ if ":" in api_mtls_endpoint
+ else api_mtls_endpoint + ":443"
+ )
+
+ if credentials is None:
+ credentials, _ = auth.default(
+ scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
+ )
+
+ # Create SSL credentials with client_cert_source or application
+ # default SSL credentials.
+ if client_cert_source:
+ cert, key = client_cert_source()
+ ssl_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+ else:
+ ssl_credentials = SslCredentials().ssl_credentials
+
+ # create a new channel. The provided one is ignored.
+ self._grpc_channel = type(self).create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ ssl_credentials=ssl_credentials,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ self._ssl_channel_credentials = ssl_credentials
+ else:
+ host = host if ":" in host else host + ":443"
+
+ if credentials is None:
+ credentials, _ = auth.default(
+ scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
+ )
+
+ if client_cert_source_for_mtls and not ssl_channel_credentials:
+ cert, key = client_cert_source_for_mtls()
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+
+ # create a new channel. The provided one is ignored.
+ self._grpc_channel = type(self).create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ ssl_credentials=self._ssl_channel_credentials,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+ # Run the base constructor.
+ super().__init__(
+ host=host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ client_info=client_info,
+ )
+
+ self._stubs = {}
+
+ @property
+ def grpc_channel(self) -> aio.Channel:
+ """Create the channel designed to connect to this service.
+
+ This property caches on the instance; repeated calls return
+ the same channel.
+ """
+ # Return the channel from cache.
+ return self._grpc_channel
+
+ @property
+ def predict(
+ self,
+ ) -> Callable[
+ [prediction_service.PredictRequest],
+ Awaitable[prediction_service.PredictResponse],
+ ]:
+ r"""Return a callable for the predict method over gRPC.
+
+ Perform an online prediction.
+
+ Returns:
+ Callable[[~.PredictRequest],
+ Awaitable[~.PredictResponse]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "predict" not in self._stubs:
+ self._stubs["predict"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.PredictionService/Predict",
+ request_serializer=prediction_service.PredictRequest.serialize,
+ response_deserializer=prediction_service.PredictResponse.deserialize,
+ )
+ return self._stubs["predict"]
+
+
+__all__ = ("PredictionServiceGrpcAsyncIOTransport",)
diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/__init__.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/__init__.py
new file mode 100644
index 0000000000..49e9cdf0a0
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/__init__.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from .client import SpecialistPoolServiceClient
+from .async_client import SpecialistPoolServiceAsyncClient
+
+__all__ = (
+ "SpecialistPoolServiceClient",
+ "SpecialistPoolServiceAsyncClient",
+)
diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py
new file mode 100644
index 0000000000..496f6aa319
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py
@@ -0,0 +1,639 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from collections import OrderedDict
+import functools
+import re
+from typing import Dict, Sequence, Tuple, Type, Union
+import pkg_resources
+
+import google.api_core.client_options as ClientOptions # type: ignore
+from google.api_core import exceptions # type: ignore
+from google.api_core import gapic_v1 # type: ignore
+from google.api_core import retry as retries # type: ignore
+from google.auth import credentials # type: ignore
+from google.oauth2 import service_account # type: ignore
+
+from google.api_core import operation as ga_operation # type: ignore
+from google.api_core import operation_async # type: ignore
+from google.cloud.aiplatform_v1.services.specialist_pool_service import pagers
+from google.cloud.aiplatform_v1.types import operation as gca_operation
+from google.cloud.aiplatform_v1.types import specialist_pool
+from google.cloud.aiplatform_v1.types import specialist_pool as gca_specialist_pool
+from google.cloud.aiplatform_v1.types import specialist_pool_service
+from google.protobuf import empty_pb2 as empty # type: ignore
+from google.protobuf import field_mask_pb2 as field_mask # type: ignore
+
+from .transports.base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO
+from .transports.grpc_asyncio import SpecialistPoolServiceGrpcAsyncIOTransport
+from .client import SpecialistPoolServiceClient
+
+
+class SpecialistPoolServiceAsyncClient:
+ """A service for creating and managing Customer SpecialistPools.
+ When customers start Data Labeling jobs, they can reuse/create
+ Specialist Pools to bring their own Specialists to label the
+ data. Customers can add/remove Managers for the Specialist Pool
+ on Cloud console, then Managers will get email notifications to
+ manage Specialists and tasks on CrowdCompute console.
+ """
+
+ _client: SpecialistPoolServiceClient
+
+ DEFAULT_ENDPOINT = SpecialistPoolServiceClient.DEFAULT_ENDPOINT
+ DEFAULT_MTLS_ENDPOINT = SpecialistPoolServiceClient.DEFAULT_MTLS_ENDPOINT
+
+ specialist_pool_path = staticmethod(
+ SpecialistPoolServiceClient.specialist_pool_path
+ )
+ parse_specialist_pool_path = staticmethod(
+ SpecialistPoolServiceClient.parse_specialist_pool_path
+ )
+
+ common_billing_account_path = staticmethod(
+ SpecialistPoolServiceClient.common_billing_account_path
+ )
+ parse_common_billing_account_path = staticmethod(
+ SpecialistPoolServiceClient.parse_common_billing_account_path
+ )
+
+ common_folder_path = staticmethod(SpecialistPoolServiceClient.common_folder_path)
+ parse_common_folder_path = staticmethod(
+ SpecialistPoolServiceClient.parse_common_folder_path
+ )
+
+ common_organization_path = staticmethod(
+ SpecialistPoolServiceClient.common_organization_path
+ )
+ parse_common_organization_path = staticmethod(
+ SpecialistPoolServiceClient.parse_common_organization_path
+ )
+
+ common_project_path = staticmethod(SpecialistPoolServiceClient.common_project_path)
+ parse_common_project_path = staticmethod(
+ SpecialistPoolServiceClient.parse_common_project_path
+ )
+
+ common_location_path = staticmethod(
+ SpecialistPoolServiceClient.common_location_path
+ )
+ parse_common_location_path = staticmethod(
+ SpecialistPoolServiceClient.parse_common_location_path
+ )
+
+ from_service_account_info = SpecialistPoolServiceClient.from_service_account_info
+ from_service_account_file = SpecialistPoolServiceClient.from_service_account_file
+ from_service_account_json = from_service_account_file
+
+ @property
+ def transport(self) -> SpecialistPoolServiceTransport:
+ """Return the transport used by the client instance.
+
+ Returns:
+ SpecialistPoolServiceTransport: The transport used by the client instance.
+ """
+ return self._client.transport
+
+ get_transport_class = functools.partial(
+ type(SpecialistPoolServiceClient).get_transport_class,
+ type(SpecialistPoolServiceClient),
+ )
+
+ def __init__(
+ self,
+ *,
+ credentials: credentials.Credentials = None,
+ transport: Union[str, SpecialistPoolServiceTransport] = "grpc_asyncio",
+ client_options: ClientOptions = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ ) -> None:
+ """Instantiate the specialist pool service client.
+
+ Args:
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ transport (Union[str, ~.SpecialistPoolServiceTransport]): The
+ transport to use. If set to None, a transport is chosen
+ automatically.
+ client_options (ClientOptions): Custom options for the client. It
+ won't take effect if a ``transport`` instance is provided.
+ (1) The ``api_endpoint`` property can be used to override the
+ default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
+ environment variable can also be used to override the endpoint:
+ "always" (always use the default mTLS endpoint), "never" (always
+ use the default regular endpoint) and "auto" (auto switch to the
+ default mTLS endpoint if client certificate is present, this is
+ the default value). However, the ``api_endpoint`` property takes
+ precedence if provided.
+ (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
+ is "true", then the ``client_cert_source`` property can be used
+ to provide client certificate for mutual TLS transport. If
+ not provided, the default SSL client certificate will be used if
+ present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
+ set, no client certificate will be used.
+
+ Raises:
+ google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
+ creation failed for any reason.
+ """
+
+ self._client = SpecialistPoolServiceClient(
+ credentials=credentials,
+ transport=transport,
+ client_options=client_options,
+ client_info=client_info,
+ )
+
+ async def create_specialist_pool(
+ self,
+ request: specialist_pool_service.CreateSpecialistPoolRequest = None,
+ *,
+ parent: str = None,
+ specialist_pool: gca_specialist_pool.SpecialistPool = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Creates a SpecialistPool.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.CreateSpecialistPoolRequest`):
+ The request object. Request message for
+ ``SpecialistPoolService.CreateSpecialistPool``.
+ parent (:class:`str`):
+ Required. The parent Project name for the new
+ SpecialistPool. The form is
+ ``projects/{project}/locations/{location}``.
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ specialist_pool (:class:`google.cloud.aiplatform_v1.types.SpecialistPool`):
+ Required. The SpecialistPool to
+ create.
+
+ This corresponds to the ``specialist_pool`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data
+ labeling jobs. It includes a group of specialist
+ managers who are responsible for managing the
+ labelers in this pool as well as customers' data
+ labeling jobs associated with this pool. Customers
+ create specialist pool as well as start data labeling
+ jobs on Cloud, managers and labelers work with the
+ jobs using CrowdCompute console.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent, specialist_pool])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = specialist_pool_service.CreateSpecialistPoolRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+ if specialist_pool is not None:
+ request.specialist_pool = specialist_pool
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.create_specialist_pool,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ gca_specialist_pool.SpecialistPool,
+ metadata_type=specialist_pool_service.CreateSpecialistPoolOperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def get_specialist_pool(
+ self,
+ request: specialist_pool_service.GetSpecialistPoolRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> specialist_pool.SpecialistPool:
+ r"""Gets a SpecialistPool.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.GetSpecialistPoolRequest`):
+ The request object. Request message for
+ ``SpecialistPoolService.GetSpecialistPool``.
+ name (:class:`str`):
+ Required. The name of the SpecialistPool resource. The
+ form is
+
+ ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``.
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.types.SpecialistPool:
+ SpecialistPool represents customers'
+ own workforce to work on their data
+ labeling jobs. It includes a group of
+ specialist managers who are responsible
+ for managing the labelers in this pool
+ as well as customers' data labeling jobs
+ associated with this pool.
+ Customers create specialist pool as well
+ as start data labeling jobs on Cloud,
+ managers and labelers work with the jobs
+ using CrowdCompute console.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = specialist_pool_service.GetSpecialistPoolRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.get_specialist_pool,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ async def list_specialist_pools(
+ self,
+ request: specialist_pool_service.ListSpecialistPoolsRequest = None,
+ *,
+ parent: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> pagers.ListSpecialistPoolsAsyncPager:
+ r"""Lists SpecialistPools in a Location.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.ListSpecialistPoolsRequest`):
+ The request object. Request message for
+ ``SpecialistPoolService.ListSpecialistPools``.
+ parent (:class:`str`):
+ Required. The name of the SpecialistPool's parent
+ resource. Format:
+ ``projects/{project}/locations/{location}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.services.specialist_pool_service.pagers.ListSpecialistPoolsAsyncPager:
+ Response message for
+ ``SpecialistPoolService.ListSpecialistPools``.
+
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = specialist_pool_service.ListSpecialistPoolsRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.list_specialist_pools,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__aiter__` convenience method.
+ response = pagers.ListSpecialistPoolsAsyncPager(
+ method=rpc, request=request, response=response, metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def delete_specialist_pool(
+ self,
+ request: specialist_pool_service.DeleteSpecialistPoolRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Deletes a SpecialistPool as well as all Specialists
+ in the pool.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.DeleteSpecialistPoolRequest`):
+ The request object. Request message for
+ ``SpecialistPoolService.DeleteSpecialistPool``.
+ name (:class:`str`):
+ Required. The resource name of the SpecialistPool to
+ delete. Format:
+ ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
+ empty messages in your APIs. A typical example is to
+ use it as the request or the response type of an API
+ method. For instance:
+
+ service Foo {
+ rpc Bar(google.protobuf.Empty) returns
+ (google.protobuf.Empty);
+
+ }
+
+ The JSON representation for Empty is empty JSON
+ object {}.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = specialist_pool_service.DeleteSpecialistPoolRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.delete_specialist_pool,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ empty.Empty,
+ metadata_type=gca_operation.DeleteOperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def update_specialist_pool(
+ self,
+ request: specialist_pool_service.UpdateSpecialistPoolRequest = None,
+ *,
+ specialist_pool: gca_specialist_pool.SpecialistPool = None,
+ update_mask: field_mask.FieldMask = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Updates a SpecialistPool.
+
+ Args:
+ request (:class:`google.cloud.aiplatform_v1.types.UpdateSpecialistPoolRequest`):
+ The request object. Request message for
+ ``SpecialistPoolService.UpdateSpecialistPool``.
+ specialist_pool (:class:`google.cloud.aiplatform_v1.types.SpecialistPool`):
+ Required. The SpecialistPool which
+ replaces the resource on the server.
+
+ This corresponds to the ``specialist_pool`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
+ Required. The update mask applies to
+ the resource.
+
+ This corresponds to the ``update_mask`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data
+ labeling jobs. It includes a group of specialist
+ managers who are responsible for managing the
+ labelers in this pool as well as customers' data
+ labeling jobs associated with this pool. Customers
+ create specialist pool as well as start data labeling
+ jobs on Cloud, managers and labelers work with the
+ jobs using CrowdCompute console.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([specialist_pool, update_mask])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = specialist_pool_service.UpdateSpecialistPoolRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if specialist_pool is not None:
+ request.specialist_pool = specialist_pool
+ if update_mask is not None:
+ request.update_mask = update_mask
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.update_specialist_pool,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata(
+ (("specialist_pool.name", request.specialist_pool.name),)
+ ),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ gca_specialist_pool.SpecialistPool,
+ metadata_type=specialist_pool_service.UpdateSpecialistPoolOperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+
+try:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=pkg_resources.get_distribution(
+ "google-cloud-aiplatform",
+ ).version,
+ )
+except pkg_resources.DistributionNotFound:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
+
+
+__all__ = ("SpecialistPoolServiceAsyncClient",)
diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py
new file mode 100644
index 0000000000..c6429b54f8
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py
@@ -0,0 +1,841 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from collections import OrderedDict
+from distutils import util
+import os
+import re
+from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
+import pkg_resources
+
+from google.api_core import client_options as client_options_lib # type: ignore
+from google.api_core import exceptions # type: ignore
+from google.api_core import gapic_v1 # type: ignore
+from google.api_core import retry as retries # type: ignore
+from google.auth import credentials # type: ignore
+from google.auth.transport import mtls # type: ignore
+from google.auth.transport.grpc import SslCredentials # type: ignore
+from google.auth.exceptions import MutualTLSChannelError # type: ignore
+from google.oauth2 import service_account # type: ignore
+
+from google.api_core import operation as ga_operation # type: ignore
+from google.api_core import operation_async # type: ignore
+from google.cloud.aiplatform_v1.services.specialist_pool_service import pagers
+from google.cloud.aiplatform_v1.types import operation as gca_operation
+from google.cloud.aiplatform_v1.types import specialist_pool
+from google.cloud.aiplatform_v1.types import specialist_pool as gca_specialist_pool
+from google.cloud.aiplatform_v1.types import specialist_pool_service
+from google.protobuf import empty_pb2 as empty # type: ignore
+from google.protobuf import field_mask_pb2 as field_mask # type: ignore
+
+from .transports.base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO
+from .transports.grpc import SpecialistPoolServiceGrpcTransport
+from .transports.grpc_asyncio import SpecialistPoolServiceGrpcAsyncIOTransport
+
+
+class SpecialistPoolServiceClientMeta(type):
+ """Metaclass for the SpecialistPoolService client.
+
+ This provides class-level methods for building and retrieving
+ support objects (e.g. transport) without polluting the client instance
+ objects.
+ """
+
+ _transport_registry = (
+ OrderedDict()
+ ) # type: Dict[str, Type[SpecialistPoolServiceTransport]]
+ _transport_registry["grpc"] = SpecialistPoolServiceGrpcTransport
+ _transport_registry["grpc_asyncio"] = SpecialistPoolServiceGrpcAsyncIOTransport
+
+ def get_transport_class(
+ cls, label: str = None,
+ ) -> Type[SpecialistPoolServiceTransport]:
+ """Return an appropriate transport class.
+
+ Args:
+ label: The name of the desired transport. If none is
+ provided, then the first transport in the registry is used.
+
+ Returns:
+ The transport class to use.
+ """
+ # If a specific transport is requested, return that one.
+ if label:
+ return cls._transport_registry[label]
+
+ # No transport is requested; return the default (that is, the first one
+ # in the dictionary).
+ return next(iter(cls._transport_registry.values()))
+
+
+class SpecialistPoolServiceClient(metaclass=SpecialistPoolServiceClientMeta):
+ """A service for creating and managing Customer SpecialistPools.
+ When customers start Data Labeling jobs, they can reuse/create
+ Specialist Pools to bring their own Specialists to label the
+ data. Customers can add/remove Managers for the Specialist Pool
+ on Cloud console, then Managers will get email notifications to
+ manage Specialists and tasks on CrowdCompute console.
+ """
+
+ @staticmethod
+ def _get_default_mtls_endpoint(api_endpoint):
+ """Convert api endpoint to mTLS endpoint.
+ Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
+ "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
+ Args:
+ api_endpoint (Optional[str]): the api endpoint to convert.
+ Returns:
+ str: converted mTLS api endpoint.
+ """
+ if not api_endpoint:
+ return api_endpoint
+
+ mtls_endpoint_re = re.compile(
+ r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?"
+ )
+
+ m = mtls_endpoint_re.match(api_endpoint)
+ name, mtls, sandbox, googledomain = m.groups()
+ if mtls or not googledomain:
+ return api_endpoint
+
+ if sandbox:
+ return api_endpoint.replace(
+ "sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
+ )
+
+ return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
+
+ DEFAULT_ENDPOINT = "aiplatform.googleapis.com"
+ DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
+ DEFAULT_ENDPOINT
+ )
+
+ @classmethod
+ def from_service_account_info(cls, info: dict, *args, **kwargs):
+ """Creates an instance of this client using the provided credentials info.
+
+ Args:
+ info (dict): The service account private key info.
+ args: Additional arguments to pass to the constructor.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ SpecialistPoolServiceClient: The constructed client.
+ """
+ credentials = service_account.Credentials.from_service_account_info(info)
+ kwargs["credentials"] = credentials
+ return cls(*args, **kwargs)
+
+ @classmethod
+ def from_service_account_file(cls, filename: str, *args, **kwargs):
+ """Creates an instance of this client using the provided credentials
+ file.
+
+ Args:
+ filename (str): The path to the service account private key json
+ file.
+ args: Additional arguments to pass to the constructor.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ SpecialistPoolServiceClient: The constructed client.
+ """
+ credentials = service_account.Credentials.from_service_account_file(filename)
+ kwargs["credentials"] = credentials
+ return cls(*args, **kwargs)
+
+ from_service_account_json = from_service_account_file
+
+ @property
+ def transport(self) -> SpecialistPoolServiceTransport:
+ """Return the transport used by the client instance.
+
+ Returns:
+ SpecialistPoolServiceTransport: The transport used by the client instance.
+ """
+ return self._transport
+
+ @staticmethod
+ def specialist_pool_path(project: str, location: str, specialist_pool: str,) -> str:
+ """Return a fully-qualified specialist_pool string."""
+ return "projects/{project}/locations/{location}/specialistPools/{specialist_pool}".format(
+ project=project, location=location, specialist_pool=specialist_pool,
+ )
+
+ @staticmethod
+ def parse_specialist_pool_path(path: str) -> Dict[str, str]:
+ """Parse a specialist_pool path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/locations/(?P.+?)/specialistPools/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_billing_account_path(billing_account: str,) -> str:
+ """Return a fully-qualified billing_account string."""
+ return "billingAccounts/{billing_account}".format(
+ billing_account=billing_account,
+ )
+
+ @staticmethod
+ def parse_common_billing_account_path(path: str) -> Dict[str, str]:
+ """Parse a billing_account path into its component segments."""
+ m = re.match(r"^billingAccounts/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_folder_path(folder: str,) -> str:
+ """Return a fully-qualified folder string."""
+ return "folders/{folder}".format(folder=folder,)
+
+ @staticmethod
+ def parse_common_folder_path(path: str) -> Dict[str, str]:
+ """Parse a folder path into its component segments."""
+ m = re.match(r"^folders/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_organization_path(organization: str,) -> str:
+ """Return a fully-qualified organization string."""
+ return "organizations/{organization}".format(organization=organization,)
+
+ @staticmethod
+ def parse_common_organization_path(path: str) -> Dict[str, str]:
+ """Parse a organization path into its component segments."""
+ m = re.match(r"^organizations/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_project_path(project: str,) -> str:
+ """Return a fully-qualified project string."""
+ return "projects/{project}".format(project=project,)
+
+ @staticmethod
+ def parse_common_project_path(path: str) -> Dict[str, str]:
+ """Parse a project path into its component segments."""
+ m = re.match(r"^projects/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_location_path(project: str, location: str,) -> str:
+ """Return a fully-qualified location string."""
+ return "projects/{project}/locations/{location}".format(
+ project=project, location=location,
+ )
+
+ @staticmethod
+ def parse_common_location_path(path: str) -> Dict[str, str]:
+ """Parse a location path into its component segments."""
+ m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ def __init__(
+ self,
+ *,
+ credentials: Optional[credentials.Credentials] = None,
+ transport: Union[str, SpecialistPoolServiceTransport, None] = None,
+ client_options: Optional[client_options_lib.ClientOptions] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ ) -> None:
+ """Instantiate the specialist pool service client.
+
+ Args:
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ transport (Union[str, SpecialistPoolServiceTransport]): The
+ transport to use. If set to None, a transport is chosen
+ automatically.
+ client_options (google.api_core.client_options.ClientOptions): Custom options for the
+ client. It won't take effect if a ``transport`` instance is provided.
+ (1) The ``api_endpoint`` property can be used to override the
+ default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
+ environment variable can also be used to override the endpoint:
+ "always" (always use the default mTLS endpoint), "never" (always
+ use the default regular endpoint) and "auto" (auto switch to the
+ default mTLS endpoint if client certificate is present, this is
+ the default value). However, the ``api_endpoint`` property takes
+ precedence if provided.
+ (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
+ is "true", then the ``client_cert_source`` property can be used
+ to provide client certificate for mutual TLS transport. If
+ not provided, the default SSL client certificate will be used if
+ present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
+ set, no client certificate will be used.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+
+ Raises:
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
+ creation failed for any reason.
+ """
+ if isinstance(client_options, dict):
+ client_options = client_options_lib.from_dict(client_options)
+ if client_options is None:
+ client_options = client_options_lib.ClientOptions()
+
+ # Create SSL credentials for mutual TLS if needed.
+ use_client_cert = bool(
+ util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
+ )
+
+ client_cert_source_func = None
+ is_mtls = False
+ if use_client_cert:
+ if client_options.client_cert_source:
+ is_mtls = True
+ client_cert_source_func = client_options.client_cert_source
+ else:
+ is_mtls = mtls.has_default_client_cert_source()
+ client_cert_source_func = (
+ mtls.default_client_cert_source() if is_mtls else None
+ )
+
+ # Figure out which api endpoint to use.
+ if client_options.api_endpoint is not None:
+ api_endpoint = client_options.api_endpoint
+ else:
+ use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
+ if use_mtls_env == "never":
+ api_endpoint = self.DEFAULT_ENDPOINT
+ elif use_mtls_env == "always":
+ api_endpoint = self.DEFAULT_MTLS_ENDPOINT
+ elif use_mtls_env == "auto":
+ api_endpoint = (
+ self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT
+ )
+ else:
+ raise MutualTLSChannelError(
+ "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
+ )
+
+ # Save or instantiate the transport.
+ # Ordinarily, we provide the transport, but allowing a custom transport
+ # instance provides an extensibility point for unusual situations.
+ if isinstance(transport, SpecialistPoolServiceTransport):
+ # transport is a SpecialistPoolServiceTransport instance.
+ if credentials or client_options.credentials_file:
+ raise ValueError(
+ "When providing a transport instance, "
+ "provide its credentials directly."
+ )
+ if client_options.scopes:
+ raise ValueError(
+ "When providing a transport instance, "
+ "provide its scopes directly."
+ )
+ self._transport = transport
+ else:
+ Transport = type(self).get_transport_class(transport)
+ self._transport = Transport(
+ credentials=credentials,
+ credentials_file=client_options.credentials_file,
+ host=api_endpoint,
+ scopes=client_options.scopes,
+ client_cert_source_for_mtls=client_cert_source_func,
+ quota_project_id=client_options.quota_project_id,
+ client_info=client_info,
+ )
+
+ def create_specialist_pool(
+ self,
+ request: specialist_pool_service.CreateSpecialistPoolRequest = None,
+ *,
+ parent: str = None,
+ specialist_pool: gca_specialist_pool.SpecialistPool = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> ga_operation.Operation:
+ r"""Creates a SpecialistPool.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.CreateSpecialistPoolRequest):
+ The request object. Request message for
+ ``SpecialistPoolService.CreateSpecialistPool``.
+ parent (str):
+ Required. The parent Project name for the new
+ SpecialistPool. The form is
+ ``projects/{project}/locations/{location}``.
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ specialist_pool (google.cloud.aiplatform_v1.types.SpecialistPool):
+ Required. The SpecialistPool to
+ create.
+
+ This corresponds to the ``specialist_pool`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation.Operation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data
+ labeling jobs. It includes a group of specialist
+ managers who are responsible for managing the
+ labelers in this pool as well as customers' data
+ labeling jobs associated with this pool. Customers
+ create specialist pool as well as start data labeling
+ jobs on Cloud, managers and labelers work with the
+ jobs using CrowdCompute console.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent, specialist_pool])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a specialist_pool_service.CreateSpecialistPoolRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, specialist_pool_service.CreateSpecialistPoolRequest):
+ request = specialist_pool_service.CreateSpecialistPoolRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+ if specialist_pool is not None:
+ request.specialist_pool = specialist_pool
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.create_specialist_pool]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = ga_operation.from_gapic(
+ response,
+ self._transport.operations_client,
+ gca_specialist_pool.SpecialistPool,
+ metadata_type=specialist_pool_service.CreateSpecialistPoolOperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def get_specialist_pool(
+ self,
+ request: specialist_pool_service.GetSpecialistPoolRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> specialist_pool.SpecialistPool:
+ r"""Gets a SpecialistPool.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.GetSpecialistPoolRequest):
+ The request object. Request message for
+ ``SpecialistPoolService.GetSpecialistPool``.
+ name (str):
+ Required. The name of the SpecialistPool resource. The
+ form is
+
+ ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``.
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.types.SpecialistPool:
+ SpecialistPool represents customers'
+ own workforce to work on their data
+ labeling jobs. It includes a group of
+ specialist managers who are responsible
+ for managing the labelers in this pool
+ as well as customers' data labeling jobs
+ associated with this pool.
+ Customers create specialist pool as well
+ as start data labeling jobs on Cloud,
+ managers and labelers work with the jobs
+ using CrowdCompute console.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a specialist_pool_service.GetSpecialistPoolRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, specialist_pool_service.GetSpecialistPoolRequest):
+ request = specialist_pool_service.GetSpecialistPoolRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.get_specialist_pool]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ def list_specialist_pools(
+ self,
+ request: specialist_pool_service.ListSpecialistPoolsRequest = None,
+ *,
+ parent: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> pagers.ListSpecialistPoolsPager:
+ r"""Lists SpecialistPools in a Location.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.ListSpecialistPoolsRequest):
+ The request object. Request message for
+ ``SpecialistPoolService.ListSpecialistPools``.
+ parent (str):
+ Required. The name of the SpecialistPool's parent
+ resource. Format:
+ ``projects/{project}/locations/{location}``
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.aiplatform_v1.services.specialist_pool_service.pagers.ListSpecialistPoolsPager:
+ Response message for
+ ``SpecialistPoolService.ListSpecialistPools``.
+
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a specialist_pool_service.ListSpecialistPoolsRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, specialist_pool_service.ListSpecialistPoolsRequest):
+ request = specialist_pool_service.ListSpecialistPoolsRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.list_specialist_pools]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__iter__` convenience method.
+ response = pagers.ListSpecialistPoolsPager(
+ method=rpc, request=request, response=response, metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def delete_specialist_pool(
+ self,
+ request: specialist_pool_service.DeleteSpecialistPoolRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> ga_operation.Operation:
+ r"""Deletes a SpecialistPool as well as all Specialists
+ in the pool.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.DeleteSpecialistPoolRequest):
+ The request object. Request message for
+ ``SpecialistPoolService.DeleteSpecialistPool``.
+ name (str):
+ Required. The resource name of the SpecialistPool to
+ delete. Format:
+ ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation.Operation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
+ empty messages in your APIs. A typical example is to
+ use it as the request or the response type of an API
+ method. For instance:
+
+ service Foo {
+ rpc Bar(google.protobuf.Empty) returns
+ (google.protobuf.Empty);
+
+ }
+
+ The JSON representation for Empty is empty JSON
+ object {}.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a specialist_pool_service.DeleteSpecialistPoolRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, specialist_pool_service.DeleteSpecialistPoolRequest):
+ request = specialist_pool_service.DeleteSpecialistPoolRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.delete_specialist_pool]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = ga_operation.from_gapic(
+ response,
+ self._transport.operations_client,
+ empty.Empty,
+ metadata_type=gca_operation.DeleteOperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def update_specialist_pool(
+ self,
+ request: specialist_pool_service.UpdateSpecialistPoolRequest = None,
+ *,
+ specialist_pool: gca_specialist_pool.SpecialistPool = None,
+ update_mask: field_mask.FieldMask = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> ga_operation.Operation:
+ r"""Updates a SpecialistPool.
+
+ Args:
+ request (google.cloud.aiplatform_v1.types.UpdateSpecialistPoolRequest):
+ The request object. Request message for
+ ``SpecialistPoolService.UpdateSpecialistPool``.
+ specialist_pool (google.cloud.aiplatform_v1.types.SpecialistPool):
+ Required. The SpecialistPool which
+ replaces the resource on the server.
+
+ This corresponds to the ``specialist_pool`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ update_mask (google.protobuf.field_mask_pb2.FieldMask):
+ Required. The update mask applies to
+ the resource.
+
+ This corresponds to the ``update_mask`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation.Operation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data
+ labeling jobs. It includes a group of specialist
+ managers who are responsible for managing the
+ labelers in this pool as well as customers' data
+ labeling jobs associated with this pool. Customers
+ create specialist pool as well as start data labeling
+ jobs on Cloud, managers and labelers work with the
+ jobs using CrowdCompute console.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([specialist_pool, update_mask])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a specialist_pool_service.UpdateSpecialistPoolRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, specialist_pool_service.UpdateSpecialistPoolRequest):
+ request = specialist_pool_service.UpdateSpecialistPoolRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if specialist_pool is not None:
+ request.specialist_pool = specialist_pool
+ if update_mask is not None:
+ request.update_mask = update_mask
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.update_specialist_pool]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata(
+ (("specialist_pool.name", request.specialist_pool.name),)
+ ),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = ga_operation.from_gapic(
+ response,
+ self._transport.operations_client,
+ gca_specialist_pool.SpecialistPool,
+ metadata_type=specialist_pool_service.UpdateSpecialistPoolOperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+
+try:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=pkg_resources.get_distribution(
+ "google-cloud-aiplatform",
+ ).version,
+ )
+except pkg_resources.DistributionNotFound:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
+
+
+__all__ = ("SpecialistPoolServiceClient",)
diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/pagers.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/pagers.py
new file mode 100644
index 0000000000..b55e53169e
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/pagers.py
@@ -0,0 +1,153 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple
+
+from google.cloud.aiplatform_v1.types import specialist_pool
+from google.cloud.aiplatform_v1.types import specialist_pool_service
+
+
+class ListSpecialistPoolsPager:
+ """A pager for iterating through ``list_specialist_pools`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.cloud.aiplatform_v1.types.ListSpecialistPoolsResponse` object, and
+ provides an ``__iter__`` method to iterate through its
+ ``specialist_pools`` field.
+
+ If there are more pages, the ``__iter__`` method will make additional
+ ``ListSpecialistPools`` requests and continue to iterate
+ through the ``specialist_pools`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.cloud.aiplatform_v1.types.ListSpecialistPoolsResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[..., specialist_pool_service.ListSpecialistPoolsResponse],
+ request: specialist_pool_service.ListSpecialistPoolsRequest,
+ response: specialist_pool_service.ListSpecialistPoolsResponse,
+ *,
+ metadata: Sequence[Tuple[str, str]] = ()
+ ):
+ """Instantiate the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.cloud.aiplatform_v1.types.ListSpecialistPoolsRequest):
+ The initial request object.
+ response (google.cloud.aiplatform_v1.types.ListSpecialistPoolsResponse):
+ The initial response object.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ self._method = method
+ self._request = specialist_pool_service.ListSpecialistPoolsRequest(request)
+ self._response = response
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ def pages(self) -> Iterable[specialist_pool_service.ListSpecialistPoolsResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = self._method(self._request, metadata=self._metadata)
+ yield self._response
+
+ def __iter__(self) -> Iterable[specialist_pool.SpecialistPool]:
+ for page in self.pages:
+ yield from page.specialist_pools
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
+
+
+class ListSpecialistPoolsAsyncPager:
+ """A pager for iterating through ``list_specialist_pools`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.cloud.aiplatform_v1.types.ListSpecialistPoolsResponse` object, and
+ provides an ``__aiter__`` method to iterate through its
+ ``specialist_pools`` field.
+
+ If there are more pages, the ``__aiter__`` method will make additional
+ ``ListSpecialistPools`` requests and continue to iterate
+ through the ``specialist_pools`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.cloud.aiplatform_v1.types.ListSpecialistPoolsResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[
+ ..., Awaitable[specialist_pool_service.ListSpecialistPoolsResponse]
+ ],
+ request: specialist_pool_service.ListSpecialistPoolsRequest,
+ response: specialist_pool_service.ListSpecialistPoolsResponse,
+ *,
+ metadata: Sequence[Tuple[str, str]] = ()
+ ):
+ """Instantiate the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.cloud.aiplatform_v1.types.ListSpecialistPoolsRequest):
+ The initial request object.
+ response (google.cloud.aiplatform_v1.types.ListSpecialistPoolsResponse):
+ The initial response object.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ self._method = method
+ self._request = specialist_pool_service.ListSpecialistPoolsRequest(request)
+ self._response = response
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ async def pages(
+ self,
+ ) -> AsyncIterable[specialist_pool_service.ListSpecialistPoolsResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = await self._method(self._request, metadata=self._metadata)
+ yield self._response
+
+ def __aiter__(self) -> AsyncIterable[specialist_pool.SpecialistPool]:
+ async def async_generator():
+ async for page in self.pages:
+ for response in page.specialist_pools:
+ yield response
+
+ return async_generator()
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/__init__.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/__init__.py
new file mode 100644
index 0000000000..1bb2fbf22a
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/__init__.py
@@ -0,0 +1,37 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from collections import OrderedDict
+from typing import Dict, Type
+
+from .base import SpecialistPoolServiceTransport
+from .grpc import SpecialistPoolServiceGrpcTransport
+from .grpc_asyncio import SpecialistPoolServiceGrpcAsyncIOTransport
+
+
+# Compile a registry of transports.
+_transport_registry = (
+ OrderedDict()
+) # type: Dict[str, Type[SpecialistPoolServiceTransport]]
+_transport_registry["grpc"] = SpecialistPoolServiceGrpcTransport
+_transport_registry["grpc_asyncio"] = SpecialistPoolServiceGrpcAsyncIOTransport
+
+__all__ = (
+ "SpecialistPoolServiceTransport",
+ "SpecialistPoolServiceGrpcTransport",
+ "SpecialistPoolServiceGrpcAsyncIOTransport",
+)
diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/base.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/base.py
new file mode 100644
index 0000000000..56de21b988
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/base.py
@@ -0,0 +1,194 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import abc
+import typing
+import pkg_resources
+
+from google import auth # type: ignore
+from google.api_core import exceptions # type: ignore
+from google.api_core import gapic_v1 # type: ignore
+from google.api_core import retry as retries # type: ignore
+from google.api_core import operations_v1 # type: ignore
+from google.auth import credentials # type: ignore
+
+from google.cloud.aiplatform_v1.types import specialist_pool
+from google.cloud.aiplatform_v1.types import specialist_pool_service
+from google.longrunning import operations_pb2 as operations # type: ignore
+
+
+try:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=pkg_resources.get_distribution(
+ "google-cloud-aiplatform",
+ ).version,
+ )
+except pkg_resources.DistributionNotFound:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
+
+
+class SpecialistPoolServiceTransport(abc.ABC):
+ """Abstract transport class for SpecialistPoolService."""
+
+ AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
+
+ def __init__(
+ self,
+ *,
+ host: str = "aiplatform.googleapis.com",
+ credentials: credentials.Credentials = None,
+ credentials_file: typing.Optional[str] = None,
+ scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES,
+ quota_project_id: typing.Optional[str] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ **kwargs,
+ ) -> None:
+ """Instantiate the transport.
+
+ Args:
+ host (Optional[str]): The hostname to connect to.
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is mutually exclusive with credentials.
+ scope (Optional[Sequence[str]]): A list of scopes.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+ """
+ # Save the hostname. Default to port 443 (HTTPS) if none is specified.
+ if ":" not in host:
+ host += ":443"
+ self._host = host
+
+ # If no credentials are provided, then determine the appropriate
+ # defaults.
+ if credentials and credentials_file:
+ raise exceptions.DuplicateCredentialArgs(
+ "'credentials_file' and 'credentials' are mutually exclusive"
+ )
+
+ if credentials_file is not None:
+ credentials, _ = auth.load_credentials_from_file(
+ credentials_file, scopes=scopes, quota_project_id=quota_project_id
+ )
+
+ elif credentials is None:
+ credentials, _ = auth.default(
+ scopes=scopes, quota_project_id=quota_project_id
+ )
+
+ # Save the credentials.
+ self._credentials = credentials
+
+ # Lifted into its own function so it can be stubbed out during tests.
+ self._prep_wrapped_messages(client_info)
+
+ def _prep_wrapped_messages(self, client_info):
+ # Precompute the wrapped methods.
+ self._wrapped_methods = {
+ self.create_specialist_pool: gapic_v1.method.wrap_method(
+ self.create_specialist_pool,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.get_specialist_pool: gapic_v1.method.wrap_method(
+ self.get_specialist_pool, default_timeout=None, client_info=client_info,
+ ),
+ self.list_specialist_pools: gapic_v1.method.wrap_method(
+ self.list_specialist_pools,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.delete_specialist_pool: gapic_v1.method.wrap_method(
+ self.delete_specialist_pool,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.update_specialist_pool: gapic_v1.method.wrap_method(
+ self.update_specialist_pool,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ }
+
+ @property
+ def operations_client(self) -> operations_v1.OperationsClient:
+ """Return the client designed to process long-running operations."""
+ raise NotImplementedError()
+
+ @property
+ def create_specialist_pool(
+ self,
+ ) -> typing.Callable[
+ [specialist_pool_service.CreateSpecialistPoolRequest],
+ typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def get_specialist_pool(
+ self,
+ ) -> typing.Callable[
+ [specialist_pool_service.GetSpecialistPoolRequest],
+ typing.Union[
+ specialist_pool.SpecialistPool,
+ typing.Awaitable[specialist_pool.SpecialistPool],
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def list_specialist_pools(
+ self,
+ ) -> typing.Callable[
+ [specialist_pool_service.ListSpecialistPoolsRequest],
+ typing.Union[
+ specialist_pool_service.ListSpecialistPoolsResponse,
+ typing.Awaitable[specialist_pool_service.ListSpecialistPoolsResponse],
+ ],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def delete_specialist_pool(
+ self,
+ ) -> typing.Callable[
+ [specialist_pool_service.DeleteSpecialistPoolRequest],
+ typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def update_specialist_pool(
+ self,
+ ) -> typing.Callable[
+ [specialist_pool_service.UpdateSpecialistPoolRequest],
+ typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ ]:
+ raise NotImplementedError()
+
+
+__all__ = ("SpecialistPoolServiceTransport",)
diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc.py
new file mode 100644
index 0000000000..c9895648d2
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc.py
@@ -0,0 +1,418 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import warnings
+from typing import Callable, Dict, Optional, Sequence, Tuple
+
+from google.api_core import grpc_helpers # type: ignore
+from google.api_core import operations_v1 # type: ignore
+from google.api_core import gapic_v1 # type: ignore
+from google import auth # type: ignore
+from google.auth import credentials # type: ignore
+from google.auth.transport.grpc import SslCredentials # type: ignore
+
+import grpc # type: ignore
+
+from google.cloud.aiplatform_v1.types import specialist_pool
+from google.cloud.aiplatform_v1.types import specialist_pool_service
+from google.longrunning import operations_pb2 as operations # type: ignore
+
+from .base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO
+
+
+class SpecialistPoolServiceGrpcTransport(SpecialistPoolServiceTransport):
+ """gRPC backend transport for SpecialistPoolService.
+
+ A service for creating and managing Customer SpecialistPools.
+ When customers start Data Labeling jobs, they can reuse/create
+ Specialist Pools to bring their own Specialists to label the
+ data. Customers can add/remove Managers for the Specialist Pool
+ on Cloud console, then Managers will get email notifications to
+ manage Specialists and tasks on CrowdCompute console.
+
+ This class defines the same methods as the primary client, so the
+ primary client can load the underlying transport implementation
+ and call it.
+
+ It sends protocol buffers over the wire using gRPC (which is built on
+ top of HTTP/2); the ``grpcio`` package must be installed.
+ """
+
+ _stubs: Dict[str, Callable]
+
+ def __init__(
+ self,
+ *,
+ host: str = "aiplatform.googleapis.com",
+ credentials: credentials.Credentials = None,
+ credentials_file: str = None,
+ scopes: Sequence[str] = None,
+ channel: grpc.Channel = None,
+ api_mtls_endpoint: str = None,
+ client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
+ ssl_channel_credentials: grpc.ChannelCredentials = None,
+ client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
+ quota_project_id: Optional[str] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ ) -> None:
+ """Instantiate the transport.
+
+ Args:
+ host (Optional[str]): The hostname to connect to.
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ This argument is ignored if ``channel`` is provided.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is ignored if ``channel`` is provided.
+ scopes (Optional(Sequence[str])): A list of scopes. This argument is
+ ignored if ``channel`` is provided.
+ channel (Optional[grpc.Channel]): A ``Channel`` instance through
+ which to make calls.
+ api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
+ If provided, it overrides the ``host`` argument and tries to create
+ a mutual TLS channel with client SSL credentials from
+ ``client_cert_source`` or applicatin default SSL credentials.
+ client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
+ Deprecated. A callback to provide client SSL certificate bytes and
+ private key bytes, both in PEM format. It is ignored if
+ ``api_mtls_endpoint`` is None.
+ ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
+ for grpc channel. It is ignored if ``channel`` is provided.
+ client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
+ A callback to provide client certificate bytes and private key bytes,
+ both in PEM format. It is used to configure mutual TLS channel. It is
+ ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+
+ Raises:
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
+ creation failed for any reason.
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
+ and ``credentials_file`` are passed.
+ """
+ self._ssl_channel_credentials = ssl_channel_credentials
+
+ if api_mtls_endpoint:
+ warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
+ if client_cert_source:
+ warnings.warn("client_cert_source is deprecated", DeprecationWarning)
+
+ if channel:
+ # Sanity check: Ensure that channel and credentials are not both
+ # provided.
+ credentials = False
+
+ # If a channel was explicitly provided, set it.
+ self._grpc_channel = channel
+ self._ssl_channel_credentials = None
+ elif api_mtls_endpoint:
+ host = (
+ api_mtls_endpoint
+ if ":" in api_mtls_endpoint
+ else api_mtls_endpoint + ":443"
+ )
+
+ if credentials is None:
+ credentials, _ = auth.default(
+ scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
+ )
+
+ # Create SSL credentials with client_cert_source or application
+ # default SSL credentials.
+ if client_cert_source:
+ cert, key = client_cert_source()
+ ssl_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+ else:
+ ssl_credentials = SslCredentials().ssl_credentials
+
+ # create a new channel. The provided one is ignored.
+ self._grpc_channel = type(self).create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ ssl_credentials=ssl_credentials,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ self._ssl_channel_credentials = ssl_credentials
+ else:
+ host = host if ":" in host else host + ":443"
+
+ if credentials is None:
+ credentials, _ = auth.default(
+ scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
+ )
+
+ if client_cert_source_for_mtls and not ssl_channel_credentials:
+ cert, key = client_cert_source_for_mtls()
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+
+ # create a new channel. The provided one is ignored.
+ self._grpc_channel = type(self).create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ ssl_credentials=self._ssl_channel_credentials,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+ self._stubs = {} # type: Dict[str, Callable]
+ self._operations_client = None
+
+ # Run the base constructor.
+ super().__init__(
+ host=host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ client_info=client_info,
+ )
+
+ @classmethod
+ def create_channel(
+ cls,
+ host: str = "aiplatform.googleapis.com",
+ credentials: credentials.Credentials = None,
+ credentials_file: str = None,
+ scopes: Optional[Sequence[str]] = None,
+ quota_project_id: Optional[str] = None,
+ **kwargs,
+ ) -> grpc.Channel:
+ """Create and return a gRPC channel object.
+ Args:
+ address (Optional[str]): The host for the channel to use.
+ credentials (Optional[~.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify this application to the service. If
+ none are specified, the client will attempt to ascertain
+ the credentials from the environment.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is mutually exclusive with credentials.
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
+ service. These are only used when credentials are not specified and
+ are passed to :func:`google.auth.default`.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ kwargs (Optional[dict]): Keyword arguments, which are passed to the
+ channel creation.
+ Returns:
+ grpc.Channel: A gRPC channel object.
+
+ Raises:
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
+ and ``credentials_file`` are passed.
+ """
+ scopes = scopes or cls.AUTH_SCOPES
+ return grpc_helpers.create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes,
+ quota_project_id=quota_project_id,
+ **kwargs,
+ )
+
+ @property
+ def grpc_channel(self) -> grpc.Channel:
+ """Return the channel designed to connect to this service.
+ """
+ return self._grpc_channel
+
+ @property
+ def operations_client(self) -> operations_v1.OperationsClient:
+ """Create the client designed to process long-running operations.
+
+ This property caches on the instance; repeated calls return the same
+ client.
+ """
+ # Sanity check: Only create a new client if we do not already have one.
+ if self._operations_client is None:
+ self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
+
+ # Return the client from cache.
+ return self._operations_client
+
+ @property
+ def create_specialist_pool(
+ self,
+ ) -> Callable[
+ [specialist_pool_service.CreateSpecialistPoolRequest], operations.Operation
+ ]:
+ r"""Return a callable for the create specialist pool method over gRPC.
+
+ Creates a SpecialistPool.
+
+ Returns:
+ Callable[[~.CreateSpecialistPoolRequest],
+ ~.Operation]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "create_specialist_pool" not in self._stubs:
+ self._stubs["create_specialist_pool"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.SpecialistPoolService/CreateSpecialistPool",
+ request_serializer=specialist_pool_service.CreateSpecialistPoolRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["create_specialist_pool"]
+
+ @property
+ def get_specialist_pool(
+ self,
+ ) -> Callable[
+ [specialist_pool_service.GetSpecialistPoolRequest],
+ specialist_pool.SpecialistPool,
+ ]:
+ r"""Return a callable for the get specialist pool method over gRPC.
+
+ Gets a SpecialistPool.
+
+ Returns:
+ Callable[[~.GetSpecialistPoolRequest],
+ ~.SpecialistPool]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "get_specialist_pool" not in self._stubs:
+ self._stubs["get_specialist_pool"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.SpecialistPoolService/GetSpecialistPool",
+ request_serializer=specialist_pool_service.GetSpecialistPoolRequest.serialize,
+ response_deserializer=specialist_pool.SpecialistPool.deserialize,
+ )
+ return self._stubs["get_specialist_pool"]
+
+ @property
+ def list_specialist_pools(
+ self,
+ ) -> Callable[
+ [specialist_pool_service.ListSpecialistPoolsRequest],
+ specialist_pool_service.ListSpecialistPoolsResponse,
+ ]:
+ r"""Return a callable for the list specialist pools method over gRPC.
+
+ Lists SpecialistPools in a Location.
+
+ Returns:
+ Callable[[~.ListSpecialistPoolsRequest],
+ ~.ListSpecialistPoolsResponse]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_specialist_pools" not in self._stubs:
+ self._stubs["list_specialist_pools"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.SpecialistPoolService/ListSpecialistPools",
+ request_serializer=specialist_pool_service.ListSpecialistPoolsRequest.serialize,
+ response_deserializer=specialist_pool_service.ListSpecialistPoolsResponse.deserialize,
+ )
+ return self._stubs["list_specialist_pools"]
+
+ @property
+ def delete_specialist_pool(
+ self,
+ ) -> Callable[
+ [specialist_pool_service.DeleteSpecialistPoolRequest], operations.Operation
+ ]:
+ r"""Return a callable for the delete specialist pool method over gRPC.
+
+ Deletes a SpecialistPool as well as all Specialists
+ in the pool.
+
+ Returns:
+ Callable[[~.DeleteSpecialistPoolRequest],
+ ~.Operation]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "delete_specialist_pool" not in self._stubs:
+ self._stubs["delete_specialist_pool"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.SpecialistPoolService/DeleteSpecialistPool",
+ request_serializer=specialist_pool_service.DeleteSpecialistPoolRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["delete_specialist_pool"]
+
+ @property
+ def update_specialist_pool(
+ self,
+ ) -> Callable[
+ [specialist_pool_service.UpdateSpecialistPoolRequest], operations.Operation
+ ]:
+ r"""Return a callable for the update specialist pool method over gRPC.
+
+ Updates a SpecialistPool.
+
+ Returns:
+ Callable[[~.UpdateSpecialistPoolRequest],
+ ~.Operation]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "update_specialist_pool" not in self._stubs:
+ self._stubs["update_specialist_pool"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.SpecialistPoolService/UpdateSpecialistPool",
+ request_serializer=specialist_pool_service.UpdateSpecialistPoolRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["update_specialist_pool"]
+
+
+__all__ = ("SpecialistPoolServiceGrpcTransport",)
diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc_asyncio.py
new file mode 100644
index 0000000000..566d0b022b
--- /dev/null
+++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc_asyncio.py
@@ -0,0 +1,427 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import warnings
+from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple
+
+from google.api_core import gapic_v1 # type: ignore
+from google.api_core import grpc_helpers_async # type: ignore
+from google.api_core import operations_v1 # type: ignore
+from google import auth # type: ignore
+from google.auth import credentials # type: ignore
+from google.auth.transport.grpc import SslCredentials # type: ignore
+
+import grpc # type: ignore
+from grpc.experimental import aio # type: ignore
+
+from google.cloud.aiplatform_v1.types import specialist_pool
+from google.cloud.aiplatform_v1.types import specialist_pool_service
+from google.longrunning import operations_pb2 as operations # type: ignore
+
+from .base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO
+from .grpc import SpecialistPoolServiceGrpcTransport
+
+
+class SpecialistPoolServiceGrpcAsyncIOTransport(SpecialistPoolServiceTransport):
+ """gRPC AsyncIO backend transport for SpecialistPoolService.
+
+ A service for creating and managing Customer SpecialistPools.
+ When customers start Data Labeling jobs, they can reuse/create
+ Specialist Pools to bring their own Specialists to label the
+ data. Customers can add/remove Managers for the Specialist Pool
+ on Cloud console, then Managers will get email notifications to
+ manage Specialists and tasks on CrowdCompute console.
+
+ This class defines the same methods as the primary client, so the
+ primary client can load the underlying transport implementation
+ and call it.
+
+ It sends protocol buffers over the wire using gRPC (which is built on
+ top of HTTP/2); the ``grpcio`` package must be installed.
+ """
+
+ _grpc_channel: aio.Channel
+ _stubs: Dict[str, Callable] = {}
+
+ @classmethod
+ def create_channel(
+ cls,
+ host: str = "aiplatform.googleapis.com",
+ credentials: credentials.Credentials = None,
+ credentials_file: Optional[str] = None,
+ scopes: Optional[Sequence[str]] = None,
+ quota_project_id: Optional[str] = None,
+ **kwargs,
+ ) -> aio.Channel:
+ """Create and return a gRPC AsyncIO channel object.
+ Args:
+ address (Optional[str]): The host for the channel to use.
+ credentials (Optional[~.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify this application to the service. If
+ none are specified, the client will attempt to ascertain
+ the credentials from the environment.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is ignored if ``channel`` is provided.
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
+ service. These are only used when credentials are not specified and
+ are passed to :func:`google.auth.default`.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ kwargs (Optional[dict]): Keyword arguments, which are passed to the
+ channel creation.
+ Returns:
+ aio.Channel: A gRPC AsyncIO channel object.
+ """
+ scopes = scopes or cls.AUTH_SCOPES
+ return grpc_helpers_async.create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes,
+ quota_project_id=quota_project_id,
+ **kwargs,
+ )
+
+ def __init__(
+ self,
+ *,
+ host: str = "aiplatform.googleapis.com",
+ credentials: credentials.Credentials = None,
+ credentials_file: Optional[str] = None,
+ scopes: Optional[Sequence[str]] = None,
+ channel: aio.Channel = None,
+ api_mtls_endpoint: str = None,
+ client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
+ ssl_channel_credentials: grpc.ChannelCredentials = None,
+ client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
+ quota_project_id=None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ ) -> None:
+ """Instantiate the transport.
+
+ Args:
+ host (Optional[str]): The hostname to connect to.
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ This argument is ignored if ``channel`` is provided.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is ignored if ``channel`` is provided.
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
+ service. These are only used when credentials are not specified and
+ are passed to :func:`google.auth.default`.
+ channel (Optional[aio.Channel]): A ``Channel`` instance through
+ which to make calls.
+ api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
+ If provided, it overrides the ``host`` argument and tries to create
+ a mutual TLS channel with client SSL credentials from
+ ``client_cert_source`` or applicatin default SSL credentials.
+ client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
+ Deprecated. A callback to provide client SSL certificate bytes and
+ private key bytes, both in PEM format. It is ignored if
+ ``api_mtls_endpoint`` is None.
+ ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
+ for grpc channel. It is ignored if ``channel`` is provided.
+ client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
+ A callback to provide client certificate bytes and private key bytes,
+ both in PEM format. It is used to configure mutual TLS channel. It is
+ ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+
+ Raises:
+ google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
+ creation failed for any reason.
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
+ and ``credentials_file`` are passed.
+ """
+ self._ssl_channel_credentials = ssl_channel_credentials
+
+ if api_mtls_endpoint:
+ warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
+ if client_cert_source:
+ warnings.warn("client_cert_source is deprecated", DeprecationWarning)
+
+ if channel:
+ # Sanity check: Ensure that channel and credentials are not both
+ # provided.
+ credentials = False
+
+ # If a channel was explicitly provided, set it.
+ self._grpc_channel = channel
+ self._ssl_channel_credentials = None
+ elif api_mtls_endpoint:
+ host = (
+ api_mtls_endpoint
+ if ":" in api_mtls_endpoint
+ else api_mtls_endpoint + ":443"
+ )
+
+ if credentials is None:
+ credentials, _ = auth.default(
+ scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
+ )
+
+ # Create SSL credentials with client_cert_source or application
+ # default SSL credentials.
+ if client_cert_source:
+ cert, key = client_cert_source()
+ ssl_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+ else:
+ ssl_credentials = SslCredentials().ssl_credentials
+
+ # create a new channel. The provided one is ignored.
+ self._grpc_channel = type(self).create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ ssl_credentials=ssl_credentials,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ self._ssl_channel_credentials = ssl_credentials
+ else:
+ host = host if ":" in host else host + ":443"
+
+ if credentials is None:
+ credentials, _ = auth.default(
+ scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
+ )
+
+ if client_cert_source_for_mtls and not ssl_channel_credentials:
+ cert, key = client_cert_source_for_mtls()
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+
+ # create a new channel. The provided one is ignored.
+ self._grpc_channel = type(self).create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ ssl_credentials=self._ssl_channel_credentials,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+ # Run the base constructor.
+ super().__init__(
+ host=host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ client_info=client_info,
+ )
+
+ self._stubs = {}
+ self._operations_client = None
+
+ @property
+ def grpc_channel(self) -> aio.Channel:
+ """Create the channel designed to connect to this service.
+
+ This property caches on the instance; repeated calls return
+ the same channel.
+ """
+ # Return the channel from cache.
+ return self._grpc_channel
+
+ @property
+ def operations_client(self) -> operations_v1.OperationsAsyncClient:
+ """Create the client designed to process long-running operations.
+
+ This property caches on the instance; repeated calls return the same
+ client.
+ """
+ # Sanity check: Only create a new client if we do not already have one.
+ if self._operations_client is None:
+ self._operations_client = operations_v1.OperationsAsyncClient(
+ self.grpc_channel
+ )
+
+ # Return the client from cache.
+ return self._operations_client
+
+ @property
+ def create_specialist_pool(
+ self,
+ ) -> Callable[
+ [specialist_pool_service.CreateSpecialistPoolRequest],
+ Awaitable[operations.Operation],
+ ]:
+ r"""Return a callable for the create specialist pool method over gRPC.
+
+ Creates a SpecialistPool.
+
+ Returns:
+ Callable[[~.CreateSpecialistPoolRequest],
+ Awaitable[~.Operation]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "create_specialist_pool" not in self._stubs:
+ self._stubs["create_specialist_pool"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.SpecialistPoolService/CreateSpecialistPool",
+ request_serializer=specialist_pool_service.CreateSpecialistPoolRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["create_specialist_pool"]
+
+ @property
+ def get_specialist_pool(
+ self,
+ ) -> Callable[
+ [specialist_pool_service.GetSpecialistPoolRequest],
+ Awaitable[specialist_pool.SpecialistPool],
+ ]:
+ r"""Return a callable for the get specialist pool method over gRPC.
+
+ Gets a SpecialistPool.
+
+ Returns:
+ Callable[[~.GetSpecialistPoolRequest],
+ Awaitable[~.SpecialistPool]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "get_specialist_pool" not in self._stubs:
+ self._stubs["get_specialist_pool"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.SpecialistPoolService/GetSpecialistPool",
+ request_serializer=specialist_pool_service.GetSpecialistPoolRequest.serialize,
+ response_deserializer=specialist_pool.SpecialistPool.deserialize,
+ )
+ return self._stubs["get_specialist_pool"]
+
+ @property
+ def list_specialist_pools(
+ self,
+ ) -> Callable[
+ [specialist_pool_service.ListSpecialistPoolsRequest],
+ Awaitable[specialist_pool_service.ListSpecialistPoolsResponse],
+ ]:
+ r"""Return a callable for the list specialist pools method over gRPC.
+
+ Lists SpecialistPools in a Location.
+
+ Returns:
+ Callable[[~.ListSpecialistPoolsRequest],
+ Awaitable[~.ListSpecialistPoolsResponse]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_specialist_pools" not in self._stubs:
+ self._stubs["list_specialist_pools"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.SpecialistPoolService/ListSpecialistPools",
+ request_serializer=specialist_pool_service.ListSpecialistPoolsRequest.serialize,
+ response_deserializer=specialist_pool_service.ListSpecialistPoolsResponse.deserialize,
+ )
+ return self._stubs["list_specialist_pools"]
+
+ @property
+ def delete_specialist_pool(
+ self,
+ ) -> Callable[
+ [specialist_pool_service.DeleteSpecialistPoolRequest],
+ Awaitable[operations.Operation],
+ ]:
+ r"""Return a callable for the delete specialist pool method over gRPC.
+
+ Deletes a SpecialistPool as well as all Specialists
+ in the pool.
+
+ Returns:
+ Callable[[~.DeleteSpecialistPoolRequest],
+ Awaitable[~.Operation]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "delete_specialist_pool" not in self._stubs:
+ self._stubs["delete_specialist_pool"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.SpecialistPoolService/DeleteSpecialistPool",
+ request_serializer=specialist_pool_service.DeleteSpecialistPoolRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["delete_specialist_pool"]
+
+ @property
+ def update_specialist_pool(
+ self,
+ ) -> Callable[
+ [specialist_pool_service.UpdateSpecialistPoolRequest],
+ Awaitable[operations.Operation],
+ ]:
+ r"""Return a callable for the update specialist pool method over gRPC.
+
+ Updates a SpecialistPool.
+
+ Returns:
+ Callable[[~.UpdateSpecialistPoolRequest],
+ Awaitable[~.Operation]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "update_specialist_pool" not in self._stubs:
+ self._stubs["update_specialist_pool"] = self.grpc_channel.unary_unary(
+ "/google.cloud.aiplatform.v1.SpecialistPoolService/UpdateSpecialistPool",
+ request_serializer=specialist_pool_service.UpdateSpecialistPoolRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["update_specialist_pool"]
+
+
+__all__ = ("SpecialistPoolServiceGrpcAsyncIOTransport",)
diff --git a/google/cloud/aiplatform_v1/types/__init__.py b/google/cloud/aiplatform_v1/types/__init__.py
new file mode 100644
index 0000000000..f073d451fe
--- /dev/null
+++ b/google/cloud/aiplatform_v1/types/__init__.py
@@ -0,0 +1,361 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from .user_action_reference import UserActionReference
+from .annotation import Annotation
+from .annotation_spec import AnnotationSpec
+from .completion_stats import CompletionStats
+from .encryption_spec import EncryptionSpec
+from .io import (
+ GcsSource,
+ GcsDestination,
+ BigQuerySource,
+ BigQueryDestination,
+ ContainerRegistryDestination,
+)
+from .machine_resources import (
+ MachineSpec,
+ DedicatedResources,
+ AutomaticResources,
+ BatchDedicatedResources,
+ ResourcesConsumed,
+ DiskSpec,
+)
+from .manual_batch_tuning_parameters import ManualBatchTuningParameters
+from .batch_prediction_job import BatchPredictionJob
+from .env_var import EnvVar
+from .custom_job import (
+ CustomJob,
+ CustomJobSpec,
+ WorkerPoolSpec,
+ ContainerSpec,
+ PythonPackageSpec,
+ Scheduling,
+)
+from .data_item import DataItem
+from .specialist_pool import SpecialistPool
+from .data_labeling_job import (
+ DataLabelingJob,
+ ActiveLearningConfig,
+ SampleConfig,
+ TrainingConfig,
+)
+from .dataset import (
+ Dataset,
+ ImportDataConfig,
+ ExportDataConfig,
+)
+from .operation import (
+ GenericOperationMetadata,
+ DeleteOperationMetadata,
+)
+from .deployed_model_ref import DeployedModelRef
+from .model import (
+ Model,
+ PredictSchemata,
+ ModelContainerSpec,
+ Port,
+)
+from .training_pipeline import (
+ TrainingPipeline,
+ InputDataConfig,
+ FractionSplit,
+ FilterSplit,
+ PredefinedSplit,
+ TimestampSplit,
+)
+from .dataset_service import (
+ CreateDatasetRequest,
+ CreateDatasetOperationMetadata,
+ GetDatasetRequest,
+ UpdateDatasetRequest,
+ ListDatasetsRequest,
+ ListDatasetsResponse,
+ DeleteDatasetRequest,
+ ImportDataRequest,
+ ImportDataResponse,
+ ImportDataOperationMetadata,
+ ExportDataRequest,
+ ExportDataResponse,
+ ExportDataOperationMetadata,
+ ListDataItemsRequest,
+ ListDataItemsResponse,
+ GetAnnotationSpecRequest,
+ ListAnnotationsRequest,
+ ListAnnotationsResponse,
+)
+from .endpoint import (
+ Endpoint,
+ DeployedModel,
+)
+from .endpoint_service import (
+ CreateEndpointRequest,
+ CreateEndpointOperationMetadata,
+ GetEndpointRequest,
+ ListEndpointsRequest,
+ ListEndpointsResponse,
+ UpdateEndpointRequest,
+ DeleteEndpointRequest,
+ DeployModelRequest,
+ DeployModelResponse,
+ DeployModelOperationMetadata,
+ UndeployModelRequest,
+ UndeployModelResponse,
+ UndeployModelOperationMetadata,
+)
+from .study import (
+ Trial,
+ StudySpec,
+ Measurement,
+)
+from .hyperparameter_tuning_job import HyperparameterTuningJob
+from .job_service import (
+ CreateCustomJobRequest,
+ GetCustomJobRequest,
+ ListCustomJobsRequest,
+ ListCustomJobsResponse,
+ DeleteCustomJobRequest,
+ CancelCustomJobRequest,
+ CreateDataLabelingJobRequest,
+ GetDataLabelingJobRequest,
+ ListDataLabelingJobsRequest,
+ ListDataLabelingJobsResponse,
+ DeleteDataLabelingJobRequest,
+ CancelDataLabelingJobRequest,
+ CreateHyperparameterTuningJobRequest,
+ GetHyperparameterTuningJobRequest,
+ ListHyperparameterTuningJobsRequest,
+ ListHyperparameterTuningJobsResponse,
+ DeleteHyperparameterTuningJobRequest,
+ CancelHyperparameterTuningJobRequest,
+ CreateBatchPredictionJobRequest,
+ GetBatchPredictionJobRequest,
+ ListBatchPredictionJobsRequest,
+ ListBatchPredictionJobsResponse,
+ DeleteBatchPredictionJobRequest,
+ CancelBatchPredictionJobRequest,
+)
+from .migratable_resource import MigratableResource
+from .migration_service import (
+ SearchMigratableResourcesRequest,
+ SearchMigratableResourcesResponse,
+ BatchMigrateResourcesRequest,
+ MigrateResourceRequest,
+ BatchMigrateResourcesResponse,
+ MigrateResourceResponse,
+ BatchMigrateResourcesOperationMetadata,
+)
+from .model_evaluation import ModelEvaluation
+from .model_evaluation_slice import ModelEvaluationSlice
+from .model_service import (
+ UploadModelRequest,
+ UploadModelOperationMetadata,
+ UploadModelResponse,
+ GetModelRequest,
+ ListModelsRequest,
+ ListModelsResponse,
+ UpdateModelRequest,
+ DeleteModelRequest,
+ ExportModelRequest,
+ ExportModelOperationMetadata,
+ ExportModelResponse,
+ GetModelEvaluationRequest,
+ ListModelEvaluationsRequest,
+ ListModelEvaluationsResponse,
+ GetModelEvaluationSliceRequest,
+ ListModelEvaluationSlicesRequest,
+ ListModelEvaluationSlicesResponse,
+)
+from .pipeline_service import (
+ CreateTrainingPipelineRequest,
+ GetTrainingPipelineRequest,
+ ListTrainingPipelinesRequest,
+ ListTrainingPipelinesResponse,
+ DeleteTrainingPipelineRequest,
+ CancelTrainingPipelineRequest,
+)
+from .prediction_service import (
+ PredictRequest,
+ PredictResponse,
+)
+from .specialist_pool_service import (
+ CreateSpecialistPoolRequest,
+ CreateSpecialistPoolOperationMetadata,
+ GetSpecialistPoolRequest,
+ ListSpecialistPoolsRequest,
+ ListSpecialistPoolsResponse,
+ DeleteSpecialistPoolRequest,
+ UpdateSpecialistPoolRequest,
+ UpdateSpecialistPoolOperationMetadata,
+)
+
+__all__ = (
+ "AcceleratorType",
+ "UserActionReference",
+ "Annotation",
+ "AnnotationSpec",
+ "CompletionStats",
+ "EncryptionSpec",
+ "GcsSource",
+ "GcsDestination",
+ "BigQuerySource",
+ "BigQueryDestination",
+ "ContainerRegistryDestination",
+ "JobState",
+ "MachineSpec",
+ "DedicatedResources",
+ "AutomaticResources",
+ "BatchDedicatedResources",
+ "ResourcesConsumed",
+ "DiskSpec",
+ "ManualBatchTuningParameters",
+ "BatchPredictionJob",
+ "EnvVar",
+ "CustomJob",
+ "CustomJobSpec",
+ "WorkerPoolSpec",
+ "ContainerSpec",
+ "PythonPackageSpec",
+ "Scheduling",
+ "DataItem",
+ "SpecialistPool",
+ "DataLabelingJob",
+ "ActiveLearningConfig",
+ "SampleConfig",
+ "TrainingConfig",
+ "Dataset",
+ "ImportDataConfig",
+ "ExportDataConfig",
+ "GenericOperationMetadata",
+ "DeleteOperationMetadata",
+ "DeployedModelRef",
+ "Model",
+ "PredictSchemata",
+ "ModelContainerSpec",
+ "Port",
+ "PipelineState",
+ "TrainingPipeline",
+ "InputDataConfig",
+ "FractionSplit",
+ "FilterSplit",
+ "PredefinedSplit",
+ "TimestampSplit",
+ "CreateDatasetRequest",
+ "CreateDatasetOperationMetadata",
+ "GetDatasetRequest",
+ "UpdateDatasetRequest",
+ "ListDatasetsRequest",
+ "ListDatasetsResponse",
+ "DeleteDatasetRequest",
+ "ImportDataRequest",
+ "ImportDataResponse",
+ "ImportDataOperationMetadata",
+ "ExportDataRequest",
+ "ExportDataResponse",
+ "ExportDataOperationMetadata",
+ "ListDataItemsRequest",
+ "ListDataItemsResponse",
+ "GetAnnotationSpecRequest",
+ "ListAnnotationsRequest",
+ "ListAnnotationsResponse",
+ "Endpoint",
+ "DeployedModel",
+ "CreateEndpointRequest",
+ "CreateEndpointOperationMetadata",
+ "GetEndpointRequest",
+ "ListEndpointsRequest",
+ "ListEndpointsResponse",
+ "UpdateEndpointRequest",
+ "DeleteEndpointRequest",
+ "DeployModelRequest",
+ "DeployModelResponse",
+ "DeployModelOperationMetadata",
+ "UndeployModelRequest",
+ "UndeployModelResponse",
+ "UndeployModelOperationMetadata",
+ "Trial",
+ "StudySpec",
+ "Measurement",
+ "HyperparameterTuningJob",
+ "CreateCustomJobRequest",
+ "GetCustomJobRequest",
+ "ListCustomJobsRequest",
+ "ListCustomJobsResponse",
+ "DeleteCustomJobRequest",
+ "CancelCustomJobRequest",
+ "CreateDataLabelingJobRequest",
+ "GetDataLabelingJobRequest",
+ "ListDataLabelingJobsRequest",
+ "ListDataLabelingJobsResponse",
+ "DeleteDataLabelingJobRequest",
+ "CancelDataLabelingJobRequest",
+ "CreateHyperparameterTuningJobRequest",
+ "GetHyperparameterTuningJobRequest",
+ "ListHyperparameterTuningJobsRequest",
+ "ListHyperparameterTuningJobsResponse",
+ "DeleteHyperparameterTuningJobRequest",
+ "CancelHyperparameterTuningJobRequest",
+ "CreateBatchPredictionJobRequest",
+ "GetBatchPredictionJobRequest",
+ "ListBatchPredictionJobsRequest",
+ "ListBatchPredictionJobsResponse",
+ "DeleteBatchPredictionJobRequest",
+ "CancelBatchPredictionJobRequest",
+ "MigratableResource",
+ "SearchMigratableResourcesRequest",
+ "SearchMigratableResourcesResponse",
+ "BatchMigrateResourcesRequest",
+ "MigrateResourceRequest",
+ "BatchMigrateResourcesResponse",
+ "MigrateResourceResponse",
+ "BatchMigrateResourcesOperationMetadata",
+ "ModelEvaluation",
+ "ModelEvaluationSlice",
+ "UploadModelRequest",
+ "UploadModelOperationMetadata",
+ "UploadModelResponse",
+ "GetModelRequest",
+ "ListModelsRequest",
+ "ListModelsResponse",
+ "UpdateModelRequest",
+ "DeleteModelRequest",
+ "ExportModelRequest",
+ "ExportModelOperationMetadata",
+ "ExportModelResponse",
+ "GetModelEvaluationRequest",
+ "ListModelEvaluationsRequest",
+ "ListModelEvaluationsResponse",
+ "GetModelEvaluationSliceRequest",
+ "ListModelEvaluationSlicesRequest",
+ "ListModelEvaluationSlicesResponse",
+ "CreateTrainingPipelineRequest",
+ "GetTrainingPipelineRequest",
+ "ListTrainingPipelinesRequest",
+ "ListTrainingPipelinesResponse",
+ "DeleteTrainingPipelineRequest",
+ "CancelTrainingPipelineRequest",
+ "PredictRequest",
+ "PredictResponse",
+ "CreateSpecialistPoolRequest",
+ "CreateSpecialistPoolOperationMetadata",
+ "GetSpecialistPoolRequest",
+ "ListSpecialistPoolsRequest",
+ "ListSpecialistPoolsResponse",
+ "DeleteSpecialistPoolRequest",
+ "UpdateSpecialistPoolRequest",
+ "UpdateSpecialistPoolOperationMetadata",
+)
diff --git a/google/cloud/aiplatform_v1/types/accelerator_type.py b/google/cloud/aiplatform_v1/types/accelerator_type.py
new file mode 100644
index 0000000000..640436c38c
--- /dev/null
+++ b/google/cloud/aiplatform_v1/types/accelerator_type.py
@@ -0,0 +1,38 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1", manifest={"AcceleratorType",},
+)
+
+
+class AcceleratorType(proto.Enum):
+ r"""Represents a hardware accelerator type."""
+ ACCELERATOR_TYPE_UNSPECIFIED = 0
+ NVIDIA_TESLA_K80 = 1
+ NVIDIA_TESLA_P100 = 2
+ NVIDIA_TESLA_V100 = 3
+ NVIDIA_TESLA_P4 = 4
+ NVIDIA_TESLA_T4 = 5
+ TPU_V2 = 6
+ TPU_V3 = 7
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform_v1/types/annotation.py b/google/cloud/aiplatform_v1/types/annotation.py
new file mode 100644
index 0000000000..000ca49dcb
--- /dev/null
+++ b/google/cloud/aiplatform_v1/types/annotation.py
@@ -0,0 +1,109 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+from google.cloud.aiplatform_v1.types import user_action_reference
+from google.protobuf import struct_pb2 as struct # type: ignore
+from google.protobuf import timestamp_pb2 as timestamp # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1", manifest={"Annotation",},
+)
+
+
+class Annotation(proto.Message):
+ r"""Used to assign specific AnnotationSpec to a particular area
+ of a DataItem or the whole part of the DataItem.
+
+ Attributes:
+ name (str):
+ Output only. Resource name of the Annotation.
+ payload_schema_uri (str):
+ Required. Google Cloud Storage URI points to a YAML file
+ describing
+ ``payload``.
+ The schema is defined as an `OpenAPI 3.0.2 Schema
+ Object `__. The schema files
+ that can be used here are found in
+ gs://google-cloud-aiplatform/schema/dataset/annotation/,
+ note that the chosen schema must be consistent with the
+ parent Dataset's
+ ``metadata``.
+ payload (google.protobuf.struct_pb2.Value):
+ Required. The schema of the payload can be found in
+ ``payload_schema``.
+ create_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. Timestamp when this Annotation
+ was created.
+ update_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. Timestamp when this Annotation
+ was last updated.
+ etag (str):
+ Optional. Used to perform consistent read-
+ odify-write updates. If not set, a blind
+ "overwrite" update happens.
+ annotation_source (google.cloud.aiplatform_v1.types.UserActionReference):
+ Output only. The source of the Annotation.
+ labels (Sequence[google.cloud.aiplatform_v1.types.Annotation.LabelsEntry]):
+ Optional. The labels with user-defined metadata to organize
+ your Annotations.
+
+ Label keys and values can be no longer than 64 characters
+ (Unicode codepoints), can only contain lowercase letters,
+ numeric characters, underscores and dashes. International
+ characters are allowed. No more than 64 user labels can be
+ associated with one Annotation(System labels are excluded).
+
+ See https://goo.gl/xmQnxf for more information and examples
+ of labels. System reserved label keys are prefixed with
+ "aiplatform.googleapis.com/" and are immutable. Following
+ system labels exist for each Annotation:
+
+ - "aiplatform.googleapis.com/annotation_set_name":
+ optional, name of the UI's annotation set this Annotation
+ belongs to. If not set, the Annotation is not visible in
+ the UI.
+
+ - "aiplatform.googleapis.com/payload_schema": output only,
+ its value is the
+ [payload_schema's][google.cloud.aiplatform.v1.Annotation.payload_schema_uri]
+ title.
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+ payload_schema_uri = proto.Field(proto.STRING, number=2)
+
+ payload = proto.Field(proto.MESSAGE, number=3, message=struct.Value,)
+
+ create_time = proto.Field(proto.MESSAGE, number=4, message=timestamp.Timestamp,)
+
+ update_time = proto.Field(proto.MESSAGE, number=7, message=timestamp.Timestamp,)
+
+ etag = proto.Field(proto.STRING, number=8)
+
+ annotation_source = proto.Field(
+ proto.MESSAGE, number=5, message=user_action_reference.UserActionReference,
+ )
+
+ labels = proto.MapField(proto.STRING, proto.STRING, number=6)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform_v1/types/annotation_spec.py b/google/cloud/aiplatform_v1/types/annotation_spec.py
new file mode 100644
index 0000000000..41f228ad72
--- /dev/null
+++ b/google/cloud/aiplatform_v1/types/annotation_spec.py
@@ -0,0 +1,65 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+from google.protobuf import timestamp_pb2 as timestamp # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1", manifest={"AnnotationSpec",},
+)
+
+
+class AnnotationSpec(proto.Message):
+ r"""Identifies a concept with which DataItems may be annotated
+ with.
+
+ Attributes:
+ name (str):
+ Output only. Resource name of the
+ AnnotationSpec.
+ display_name (str):
+ Required. The user-defined name of the
+ AnnotationSpec. The name can be up to 128
+ characters long and can be consist of any UTF-8
+ characters.
+ create_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. Timestamp when this
+ AnnotationSpec was created.
+ update_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. Timestamp when AnnotationSpec
+ was last updated.
+ etag (str):
+ Optional. Used to perform consistent read-
+ odify-write updates. If not set, a blind
+ "overwrite" update happens.
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+ display_name = proto.Field(proto.STRING, number=2)
+
+ create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,)
+
+ update_time = proto.Field(proto.MESSAGE, number=4, message=timestamp.Timestamp,)
+
+ etag = proto.Field(proto.STRING, number=5)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform_v1/types/batch_prediction_job.py b/google/cloud/aiplatform_v1/types/batch_prediction_job.py
new file mode 100644
index 0000000000..d2d8f02203
--- /dev/null
+++ b/google/cloud/aiplatform_v1/types/batch_prediction_job.py
@@ -0,0 +1,344 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+from google.cloud.aiplatform_v1.types import completion_stats as gca_completion_stats
+from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec
+from google.cloud.aiplatform_v1.types import io
+from google.cloud.aiplatform_v1.types import job_state
+from google.cloud.aiplatform_v1.types import machine_resources
+from google.cloud.aiplatform_v1.types import (
+ manual_batch_tuning_parameters as gca_manual_batch_tuning_parameters,
+)
+from google.protobuf import struct_pb2 as struct # type: ignore
+from google.protobuf import timestamp_pb2 as timestamp # type: ignore
+from google.rpc import status_pb2 as status # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1", manifest={"BatchPredictionJob",},
+)
+
+
+class BatchPredictionJob(proto.Message):
+ r"""A job that uses a
+ ``Model`` to
+ produce predictions on multiple [input
+ instances][google.cloud.aiplatform.v1.BatchPredictionJob.input_config].
+ If predictions for significant portion of the instances fail, the
+ job may finish without attempting predictions for all remaining
+ instances.
+
+ Attributes:
+ name (str):
+ Output only. Resource name of the
+ BatchPredictionJob.
+ display_name (str):
+ Required. The user-defined name of this
+ BatchPredictionJob.
+ model (str):
+ Required. The name of the Model that produces
+ the predictions via this job, must share the
+ same ancestor Location. Starting this job has no
+ impact on any existing deployments of the Model
+ and their resources.
+ input_config (google.cloud.aiplatform_v1.types.BatchPredictionJob.InputConfig):
+ Required. Input configuration of the instances on which
+ predictions are performed. The schema of any single instance
+ may be specified via the
+ [Model's][google.cloud.aiplatform.v1.BatchPredictionJob.model]
+ [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata]
+ ``instance_schema_uri``.
+ model_parameters (google.protobuf.struct_pb2.Value):
+ The parameters that govern the predictions. The schema of
+ the parameters may be specified via the
+ [Model's][google.cloud.aiplatform.v1.BatchPredictionJob.model]
+ [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata]
+ ``parameters_schema_uri``.
+ output_config (google.cloud.aiplatform_v1.types.BatchPredictionJob.OutputConfig):
+ Required. The Configuration specifying where output
+ predictions should be written. The schema of any single
+ prediction may be specified as a concatenation of
+ [Model's][google.cloud.aiplatform.v1.BatchPredictionJob.model]
+ [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata]
+ ``instance_schema_uri``
+ and
+ ``prediction_schema_uri``.
+ dedicated_resources (google.cloud.aiplatform_v1.types.BatchDedicatedResources):
+ The config of resources used by the Model during the batch
+ prediction. If the Model
+ ``supports``
+ DEDICATED_RESOURCES this config may be provided (and the job
+ will use these resources), if the Model doesn't support
+ AUTOMATIC_RESOURCES, this config must be provided.
+ manual_batch_tuning_parameters (google.cloud.aiplatform_v1.types.ManualBatchTuningParameters):
+ Immutable. Parameters configuring the batch behavior.
+ Currently only applicable when
+ ``dedicated_resources``
+ are used (in other cases AI Platform does the tuning
+ itself).
+ output_info (google.cloud.aiplatform_v1.types.BatchPredictionJob.OutputInfo):
+ Output only. Information further describing
+ the output of this job.
+ state (google.cloud.aiplatform_v1.types.JobState):
+ Output only. The detailed state of the job.
+ error (google.rpc.status_pb2.Status):
+ Output only. Only populated when the job's state is
+ JOB_STATE_FAILED or JOB_STATE_CANCELLED.
+ partial_failures (Sequence[google.rpc.status_pb2.Status]):
+ Output only. Partial failures encountered.
+ For example, single files that can't be read.
+ This field never exceeds 20 entries.
+ Status details fields contain standard GCP error
+ details.
+ resources_consumed (google.cloud.aiplatform_v1.types.ResourcesConsumed):
+ Output only. Information about resources that
+ had been consumed by this job. Provided in real
+ time at best effort basis, as well as a final
+ value once the job completes.
+
+ Note: This field currently may be not populated
+ for batch predictions that use AutoML Models.
+ completion_stats (google.cloud.aiplatform_v1.types.CompletionStats):
+ Output only. Statistics on completed and
+ failed prediction instances.
+ create_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. Time when the BatchPredictionJob
+ was created.
+ start_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. Time when the BatchPredictionJob for the first
+ time entered the ``JOB_STATE_RUNNING`` state.
+ end_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. Time when the BatchPredictionJob entered any of
+ the following states: ``JOB_STATE_SUCCEEDED``,
+ ``JOB_STATE_FAILED``, ``JOB_STATE_CANCELLED``.
+ update_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. Time when the BatchPredictionJob
+ was most recently updated.
+ labels (Sequence[google.cloud.aiplatform_v1.types.BatchPredictionJob.LabelsEntry]):
+ The labels with user-defined metadata to
+ organize BatchPredictionJobs.
+ Label keys and values can be no longer than 64
+ characters (Unicode codepoints), can only
+ contain lowercase letters, numeric characters,
+ underscores and dashes. International characters
+ are allowed.
+ See https://goo.gl/xmQnxf for more information
+ and examples of labels.
+ encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec):
+ Customer-managed encryption key options for a
+ BatchPredictionJob. If this is set, then all
+ resources created by the BatchPredictionJob will
+ be encrypted with the provided encryption key.
+ """
+
+ class InputConfig(proto.Message):
+ r"""Configures the input to
+ ``BatchPredictionJob``.
+ See
+ ``Model.supported_input_storage_formats``
+ for Model's supported input formats, and how instances should be
+ expressed via any of them.
+
+ Attributes:
+ gcs_source (google.cloud.aiplatform_v1.types.GcsSource):
+ The Cloud Storage location for the input
+ instances.
+ bigquery_source (google.cloud.aiplatform_v1.types.BigQuerySource):
+ The BigQuery location of the input table.
+ The schema of the table should be in the format
+ described by the given context OpenAPI Schema,
+ if one is provided. The table may contain
+ additional columns that are not described by the
+ schema, and they will be ignored.
+ instances_format (str):
+ Required. The format in which instances are given, must be
+ one of the
+ [Model's][google.cloud.aiplatform.v1.BatchPredictionJob.model]
+ ``supported_input_storage_formats``.
+ """
+
+ gcs_source = proto.Field(
+ proto.MESSAGE, number=2, oneof="source", message=io.GcsSource,
+ )
+
+ bigquery_source = proto.Field(
+ proto.MESSAGE, number=3, oneof="source", message=io.BigQuerySource,
+ )
+
+ instances_format = proto.Field(proto.STRING, number=1)
+
+ class OutputConfig(proto.Message):
+ r"""Configures the output of
+ ``BatchPredictionJob``.
+ See
+ ``Model.supported_output_storage_formats``
+ for supported output formats, and how predictions are expressed via
+ any of them.
+
+ Attributes:
+ gcs_destination (google.cloud.aiplatform_v1.types.GcsDestination):
+ The Cloud Storage location of the directory where the output
+ is to be written to. In the given directory a new directory
+ is created. Its name is
+ ``prediction--``, where
+ timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format.
+ Inside of it files ``predictions_0001.``,
+ ``predictions_0002.``, ...,
+ ``predictions_N.`` are created where
+ ```` depends on chosen
+ ``predictions_format``,
+ and N may equal 0001 and depends on the total number of
+ successfully predicted instances. If the Model has both
+ ``instance``
+ and
+ ``prediction``
+ schemata defined then each such file contains predictions as
+ per the
+ ``predictions_format``.
+ If prediction for any instance failed (partially or
+ completely), then an additional ``errors_0001.``,
+ ``errors_0002.``,..., ``errors_N.``
+ files are created (N depends on total number of failed
+ predictions). These files contain the failed instances, as
+ per their schema, followed by an additional ``error`` field
+ which as value has ```google.rpc.Status`` `__
+ containing only ``code`` and ``message`` fields.
+ bigquery_destination (google.cloud.aiplatform_v1.types.BigQueryDestination):
+ The BigQuery project location where the output is to be
+ written to. In the given project a new dataset is created
+ with name
+ ``prediction__`` where
+ is made BigQuery-dataset-name compatible (for example, most
+ special characters become underscores), and timestamp is in
+ YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the
+ dataset two tables will be created, ``predictions``, and
+ ``errors``. If the Model has both
+ ``instance``
+ and
+ ``prediction``
+ schemata defined then the tables have columns as follows:
+ The ``predictions`` table contains instances for which the
+ prediction succeeded, it has columns as per a concatenation
+ of the Model's instance and prediction schemata. The
+ ``errors`` table contains rows for which the prediction has
+ failed, it has instance columns, as per the instance schema,
+ followed by a single "errors" column, which as values has
+ ```google.rpc.Status`` `__ represented as a STRUCT,
+ and containing only ``code`` and ``message``.
+ predictions_format (str):
+ Required. The format in which AI Platform gives the
+ predictions, must be one of the
+ [Model's][google.cloud.aiplatform.v1.BatchPredictionJob.model]
+
+ ``supported_output_storage_formats``.
+ """
+
+ gcs_destination = proto.Field(
+ proto.MESSAGE, number=2, oneof="destination", message=io.GcsDestination,
+ )
+
+ bigquery_destination = proto.Field(
+ proto.MESSAGE,
+ number=3,
+ oneof="destination",
+ message=io.BigQueryDestination,
+ )
+
+ predictions_format = proto.Field(proto.STRING, number=1)
+
+ class OutputInfo(proto.Message):
+ r"""Further describes this job's output. Supplements
+ ``output_config``.
+
+ Attributes:
+ gcs_output_directory (str):
+ Output only. The full path of the Cloud
+ Storage directory created, into which the
+ prediction output is written.
+ bigquery_output_dataset (str):
+ Output only. The path of the BigQuery dataset created, in
+ ``bq://projectId.bqDatasetId`` format, into which the
+ prediction output is written.
+ """
+
+ gcs_output_directory = proto.Field(
+ proto.STRING, number=1, oneof="output_location"
+ )
+
+ bigquery_output_dataset = proto.Field(
+ proto.STRING, number=2, oneof="output_location"
+ )
+
+ name = proto.Field(proto.STRING, number=1)
+
+ display_name = proto.Field(proto.STRING, number=2)
+
+ model = proto.Field(proto.STRING, number=3)
+
+ input_config = proto.Field(proto.MESSAGE, number=4, message=InputConfig,)
+
+ model_parameters = proto.Field(proto.MESSAGE, number=5, message=struct.Value,)
+
+ output_config = proto.Field(proto.MESSAGE, number=6, message=OutputConfig,)
+
+ dedicated_resources = proto.Field(
+ proto.MESSAGE, number=7, message=machine_resources.BatchDedicatedResources,
+ )
+
+ manual_batch_tuning_parameters = proto.Field(
+ proto.MESSAGE,
+ number=8,
+ message=gca_manual_batch_tuning_parameters.ManualBatchTuningParameters,
+ )
+
+ output_info = proto.Field(proto.MESSAGE, number=9, message=OutputInfo,)
+
+ state = proto.Field(proto.ENUM, number=10, enum=job_state.JobState,)
+
+ error = proto.Field(proto.MESSAGE, number=11, message=status.Status,)
+
+ partial_failures = proto.RepeatedField(
+ proto.MESSAGE, number=12, message=status.Status,
+ )
+
+ resources_consumed = proto.Field(
+ proto.MESSAGE, number=13, message=machine_resources.ResourcesConsumed,
+ )
+
+ completion_stats = proto.Field(
+ proto.MESSAGE, number=14, message=gca_completion_stats.CompletionStats,
+ )
+
+ create_time = proto.Field(proto.MESSAGE, number=15, message=timestamp.Timestamp,)
+
+ start_time = proto.Field(proto.MESSAGE, number=16, message=timestamp.Timestamp,)
+
+ end_time = proto.Field(proto.MESSAGE, number=17, message=timestamp.Timestamp,)
+
+ update_time = proto.Field(proto.MESSAGE, number=18, message=timestamp.Timestamp,)
+
+ labels = proto.MapField(proto.STRING, proto.STRING, number=19)
+
+ encryption_spec = proto.Field(
+ proto.MESSAGE, number=24, message=gca_encryption_spec.EncryptionSpec,
+ )
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform_v1/types/completion_stats.py b/google/cloud/aiplatform_v1/types/completion_stats.py
new file mode 100644
index 0000000000..05648d82c4
--- /dev/null
+++ b/google/cloud/aiplatform_v1/types/completion_stats.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1", manifest={"CompletionStats",},
+)
+
+
+class CompletionStats(proto.Message):
+ r"""Success and error statistics of processing multiple entities
+ (for example, DataItems or structured data rows) in batch.
+
+ Attributes:
+ successful_count (int):
+ Output only. The number of entities that had
+ been processed successfully.
+ failed_count (int):
+ Output only. The number of entities for which
+ any error was encountered.
+ incomplete_count (int):
+ Output only. In cases when enough errors are
+ encountered a job, pipeline, or operation may be
+ failed as a whole. Below is the number of
+ entities for which the processing had not been
+ finished (either in successful or failed state).
+ Set to -1 if the number is unknown (for example,
+ the operation failed before the total entity
+ number could be collected).
+ """
+
+ successful_count = proto.Field(proto.INT64, number=1)
+
+ failed_count = proto.Field(proto.INT64, number=2)
+
+ incomplete_count = proto.Field(proto.INT64, number=3)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform_v1/types/custom_job.py b/google/cloud/aiplatform_v1/types/custom_job.py
new file mode 100644
index 0000000000..c97cba6d82
--- /dev/null
+++ b/google/cloud/aiplatform_v1/types/custom_job.py
@@ -0,0 +1,318 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec
+from google.cloud.aiplatform_v1.types import env_var
+from google.cloud.aiplatform_v1.types import io
+from google.cloud.aiplatform_v1.types import job_state
+from google.cloud.aiplatform_v1.types import machine_resources
+from google.protobuf import duration_pb2 as duration # type: ignore
+from google.protobuf import timestamp_pb2 as timestamp # type: ignore
+from google.rpc import status_pb2 as status # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1",
+ manifest={
+ "CustomJob",
+ "CustomJobSpec",
+ "WorkerPoolSpec",
+ "ContainerSpec",
+ "PythonPackageSpec",
+ "Scheduling",
+ },
+)
+
+
+class CustomJob(proto.Message):
+ r"""Represents a job that runs custom workloads such as a Docker
+ container or a Python package. A CustomJob can have multiple
+ worker pools and each worker pool can have its own machine and
+ input spec. A CustomJob will be cleaned up once the job enters
+ terminal state (failed or succeeded).
+
+ Attributes:
+ name (str):
+ Output only. Resource name of a CustomJob.
+ display_name (str):
+ Required. The display name of the CustomJob.
+ The name can be up to 128 characters long and
+ can be consist of any UTF-8 characters.
+ job_spec (google.cloud.aiplatform_v1.types.CustomJobSpec):
+ Required. Job spec.
+ state (google.cloud.aiplatform_v1.types.JobState):
+ Output only. The detailed state of the job.
+ create_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. Time when the CustomJob was
+ created.
+ start_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. Time when the CustomJob for the first time
+ entered the ``JOB_STATE_RUNNING`` state.
+ end_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. Time when the CustomJob entered any of the
+ following states: ``JOB_STATE_SUCCEEDED``,
+ ``JOB_STATE_FAILED``, ``JOB_STATE_CANCELLED``.
+ update_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. Time when the CustomJob was most
+ recently updated.
+ error (google.rpc.status_pb2.Status):
+ Output only. Only populated when job's state is
+ ``JOB_STATE_FAILED`` or ``JOB_STATE_CANCELLED``.
+ labels (Sequence[google.cloud.aiplatform_v1.types.CustomJob.LabelsEntry]):
+ The labels with user-defined metadata to
+ organize CustomJobs.
+ Label keys and values can be no longer than 64
+ characters (Unicode codepoints), can only
+ contain lowercase letters, numeric characters,
+ underscores and dashes. International characters
+ are allowed.
+ See https://goo.gl/xmQnxf for more information
+ and examples of labels.
+ encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec):
+ Customer-managed encryption key options for a
+ CustomJob. If this is set, then all resources
+ created by the CustomJob will be encrypted with
+ the provided encryption key.
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+ display_name = proto.Field(proto.STRING, number=2)
+
+ job_spec = proto.Field(proto.MESSAGE, number=4, message="CustomJobSpec",)
+
+ state = proto.Field(proto.ENUM, number=5, enum=job_state.JobState,)
+
+ create_time = proto.Field(proto.MESSAGE, number=6, message=timestamp.Timestamp,)
+
+ start_time = proto.Field(proto.MESSAGE, number=7, message=timestamp.Timestamp,)
+
+ end_time = proto.Field(proto.MESSAGE, number=8, message=timestamp.Timestamp,)
+
+ update_time = proto.Field(proto.MESSAGE, number=9, message=timestamp.Timestamp,)
+
+ error = proto.Field(proto.MESSAGE, number=10, message=status.Status,)
+
+ labels = proto.MapField(proto.STRING, proto.STRING, number=11)
+
+ encryption_spec = proto.Field(
+ proto.MESSAGE, number=12, message=gca_encryption_spec.EncryptionSpec,
+ )
+
+
+class CustomJobSpec(proto.Message):
+ r"""Represents the spec of a CustomJob.
+
+ Attributes:
+ worker_pool_specs (Sequence[google.cloud.aiplatform_v1.types.WorkerPoolSpec]):
+ Required. The spec of the worker pools
+ including machine type and Docker image.
+ scheduling (google.cloud.aiplatform_v1.types.Scheduling):
+ Scheduling options for a CustomJob.
+ service_account (str):
+ Specifies the service account for workload
+ run-as account. Users submitting jobs must have
+ act-as permission on this run-as account. If
+ unspecified, the AI Platform Custom Code Service
+ Agent for the CustomJob's project is used.
+ network (str):
+ The full name of the Compute Engine
+ `network `__
+ to which the Job should be peered. For example,
+ ``projects/12345/global/networks/myVPC``.
+ `Format `__
+ is of the form
+ ``projects/{project}/global/networks/{network}``. Where
+ {project} is a project number, as in ``12345``, and
+ {network} is a network name.
+
+ Private services access must already be configured for the
+ network. If left unspecified, the job is not peered with any
+ network.
+ base_output_directory (google.cloud.aiplatform_v1.types.GcsDestination):
+ The Cloud Storage location to store the output of this
+ CustomJob or HyperparameterTuningJob. For
+ HyperparameterTuningJob, the baseOutputDirectory of each
+ child CustomJob backing a Trial is set to a subdirectory of
+ name ``id`` under its
+ parent HyperparameterTuningJob's baseOutputDirectory.
+
+ The following AI Platform environment variables will be
+ passed to containers or python modules when this field is
+ set:
+
+ For CustomJob:
+
+ - AIP_MODEL_DIR = ``/model/``
+ - AIP_CHECKPOINT_DIR =
+ ``/checkpoints/``
+ - AIP_TENSORBOARD_LOG_DIR =
+ ``/logs/``
+
+ For CustomJob backing a Trial of HyperparameterTuningJob:
+
+ - AIP_MODEL_DIR =
+ ``//model/``
+ - AIP_CHECKPOINT_DIR =
+ ``//checkpoints/``
+ - AIP_TENSORBOARD_LOG_DIR =
+ ``//logs/``
+ """
+
+ worker_pool_specs = proto.RepeatedField(
+ proto.MESSAGE, number=1, message="WorkerPoolSpec",
+ )
+
+ scheduling = proto.Field(proto.MESSAGE, number=3, message="Scheduling",)
+
+ service_account = proto.Field(proto.STRING, number=4)
+
+ network = proto.Field(proto.STRING, number=5)
+
+ base_output_directory = proto.Field(
+ proto.MESSAGE, number=6, message=io.GcsDestination,
+ )
+
+
+class WorkerPoolSpec(proto.Message):
+ r"""Represents the spec of a worker pool in a job.
+
+ Attributes:
+ container_spec (google.cloud.aiplatform_v1.types.ContainerSpec):
+ The custom container task.
+ python_package_spec (google.cloud.aiplatform_v1.types.PythonPackageSpec):
+ The Python packaged task.
+ machine_spec (google.cloud.aiplatform_v1.types.MachineSpec):
+ Optional. Immutable. The specification of a
+ single machine.
+ replica_count (int):
+ Optional. The number of worker replicas to
+ use for this worker pool.
+ disk_spec (google.cloud.aiplatform_v1.types.DiskSpec):
+ Disk spec.
+ """
+
+ container_spec = proto.Field(
+ proto.MESSAGE, number=6, oneof="task", message="ContainerSpec",
+ )
+
+ python_package_spec = proto.Field(
+ proto.MESSAGE, number=7, oneof="task", message="PythonPackageSpec",
+ )
+
+ machine_spec = proto.Field(
+ proto.MESSAGE, number=1, message=machine_resources.MachineSpec,
+ )
+
+ replica_count = proto.Field(proto.INT64, number=2)
+
+ disk_spec = proto.Field(
+ proto.MESSAGE, number=5, message=machine_resources.DiskSpec,
+ )
+
+
+class ContainerSpec(proto.Message):
+ r"""The spec of a Container.
+
+ Attributes:
+ image_uri (str):
+ Required. The URI of a container image in the
+ Container Registry that is to be run on each
+ worker replica.
+ command (Sequence[str]):
+ The command to be invoked when the container
+ is started. It overrides the entrypoint
+ instruction in Dockerfile when provided.
+ args (Sequence[str]):
+ The arguments to be passed when starting the
+ container.
+ env (Sequence[google.cloud.aiplatform_v1.types.EnvVar]):
+ Environment variables to be passed to the
+ container.
+ """
+
+ image_uri = proto.Field(proto.STRING, number=1)
+
+ command = proto.RepeatedField(proto.STRING, number=2)
+
+ args = proto.RepeatedField(proto.STRING, number=3)
+
+ env = proto.RepeatedField(proto.MESSAGE, number=4, message=env_var.EnvVar,)
+
+
+class PythonPackageSpec(proto.Message):
+ r"""The spec of a Python packaged code.
+
+ Attributes:
+ executor_image_uri (str):
+ Required. The URI of a container image in the
+ Container Registry that will run the provided
+ python package. AI Platform provides wide range
+ of executor images with pre-installed packages
+ to meet users' various use cases. Only one of
+ the provided images can be set here.
+ package_uris (Sequence[str]):
+ Required. The Google Cloud Storage location
+ of the Python package files which are the
+ training program and its dependent packages. The
+ maximum number of package URIs is 100.
+ python_module (str):
+ Required. The Python module name to run after
+ installing the packages.
+ args (Sequence[str]):
+ Command line arguments to be passed to the
+ Python task.
+ env (Sequence[google.cloud.aiplatform_v1.types.EnvVar]):
+ Environment variables to be passed to the
+ python module.
+ """
+
+ executor_image_uri = proto.Field(proto.STRING, number=1)
+
+ package_uris = proto.RepeatedField(proto.STRING, number=2)
+
+ python_module = proto.Field(proto.STRING, number=3)
+
+ args = proto.RepeatedField(proto.STRING, number=4)
+
+ env = proto.RepeatedField(proto.MESSAGE, number=5, message=env_var.EnvVar,)
+
+
+class Scheduling(proto.Message):
+ r"""All parameters related to queuing and scheduling of custom
+ jobs.
+
+ Attributes:
+ timeout (google.protobuf.duration_pb2.Duration):
+ The maximum job running time. The default is
+ 7 days.
+ restart_job_on_worker_restart (bool):
+ Restarts the entire CustomJob if a worker
+ gets restarted. This feature can be used by
+ distributed training jobs that are not resilient
+ to workers leaving and joining a job.
+ """
+
+ timeout = proto.Field(proto.MESSAGE, number=1, message=duration.Duration,)
+
+ restart_job_on_worker_restart = proto.Field(proto.BOOL, number=3)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform_v1/types/data_item.py b/google/cloud/aiplatform_v1/types/data_item.py
new file mode 100644
index 0000000000..20ff14a0d8
--- /dev/null
+++ b/google/cloud/aiplatform_v1/types/data_item.py
@@ -0,0 +1,84 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+from google.protobuf import struct_pb2 as struct # type: ignore
+from google.protobuf import timestamp_pb2 as timestamp # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1", manifest={"DataItem",},
+)
+
+
+class DataItem(proto.Message):
+ r"""A piece of data in a Dataset. Could be an image, a video, a
+ document or plain text.
+
+ Attributes:
+ name (str):
+ Output only. The resource name of the
+ DataItem.
+ create_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. Timestamp when this DataItem was
+ created.
+ update_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. Timestamp when this DataItem was
+ last updated.
+ labels (Sequence[google.cloud.aiplatform_v1.types.DataItem.LabelsEntry]):
+ Optional. The labels with user-defined
+ metadata to organize your DataItems.
+ Label keys and values can be no longer than 64
+ characters (Unicode codepoints), can only
+ contain lowercase letters, numeric characters,
+ underscores and dashes. International characters
+ are allowed. No more than 64 user labels can be
+ associated with one DataItem(System labels are
+ excluded).
+
+ See https://goo.gl/xmQnxf for more information
+ and examples of labels. System reserved label
+ keys are prefixed with
+ "aiplatform.googleapis.com/" and are immutable.
+ payload (google.protobuf.struct_pb2.Value):
+ Required. The data that the DataItem represents (for
+ example, an image or a text snippet). The schema of the
+ payload is stored in the parent Dataset's [metadata
+ schema's][google.cloud.aiplatform.v1.Dataset.metadata_schema_uri]
+ dataItemSchemaUri field.
+ etag (str):
+ Optional. Used to perform consistent read-
+ odify-write updates. If not set, a blind
+ "overwrite" update happens.
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+ create_time = proto.Field(proto.MESSAGE, number=2, message=timestamp.Timestamp,)
+
+ update_time = proto.Field(proto.MESSAGE, number=6, message=timestamp.Timestamp,)
+
+ labels = proto.MapField(proto.STRING, proto.STRING, number=3)
+
+ payload = proto.Field(proto.MESSAGE, number=4, message=struct.Value,)
+
+ etag = proto.Field(proto.STRING, number=7)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform_v1/types/data_labeling_job.py b/google/cloud/aiplatform_v1/types/data_labeling_job.py
new file mode 100644
index 0000000000..e1058737bf
--- /dev/null
+++ b/google/cloud/aiplatform_v1/types/data_labeling_job.py
@@ -0,0 +1,275 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec
+from google.cloud.aiplatform_v1.types import job_state
+from google.protobuf import struct_pb2 as struct # type: ignore
+from google.protobuf import timestamp_pb2 as timestamp # type: ignore
+from google.rpc import status_pb2 as status # type: ignore
+from google.type import money_pb2 as money # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1",
+ manifest={
+ "DataLabelingJob",
+ "ActiveLearningConfig",
+ "SampleConfig",
+ "TrainingConfig",
+ },
+)
+
+
+class DataLabelingJob(proto.Message):
+ r"""DataLabelingJob is used to trigger a human labeling job on
+ unlabeled data from the following Dataset:
+
+ Attributes:
+ name (str):
+ Output only. Resource name of the
+ DataLabelingJob.
+ display_name (str):
+ Required. The user-defined name of the
+ DataLabelingJob. The name can be up to 128
+ characters long and can be consist of any UTF-8
+ characters.
+ Display name of a DataLabelingJob.
+ datasets (Sequence[str]):
+ Required. Dataset resource names. Right now we only support
+ labeling from a single Dataset. Format:
+ ``projects/{project}/locations/{location}/datasets/{dataset}``
+ annotation_labels (Sequence[google.cloud.aiplatform_v1.types.DataLabelingJob.AnnotationLabelsEntry]):
+ Labels to assign to annotations generated by
+ this DataLabelingJob.
+ Label keys and values can be no longer than 64
+ characters (Unicode codepoints), can only
+ contain lowercase letters, numeric characters,
+ underscores and dashes. International characters
+ are allowed. See https://goo.gl/xmQnxf for more
+ information and examples of labels. System
+ reserved label keys are prefixed with
+ "aiplatform.googleapis.com/" and are immutable.
+ labeler_count (int):
+ Required. Number of labelers to work on each
+ DataItem.
+ instruction_uri (str):
+ Required. The Google Cloud Storage location
+ of the instruction pdf. This pdf is shared with
+ labelers, and provides detailed description on
+ how to label DataItems in Datasets.
+ inputs_schema_uri (str):
+ Required. Points to a YAML file stored on
+ Google Cloud Storage describing the config for a
+ specific type of DataLabelingJob. The schema
+ files that can be used here are found in the
+ https://storage.googleapis.com/google-cloud-
+ aiplatform bucket in the
+ /schema/datalabelingjob/inputs/ folder.
+ inputs (google.protobuf.struct_pb2.Value):
+ Required. Input config parameters for the
+ DataLabelingJob.
+ state (google.cloud.aiplatform_v1.types.JobState):
+ Output only. The detailed state of the job.
+ labeling_progress (int):
+ Output only. Current labeling job progress percentage scaled
+ in interval [0, 100], indicating the percentage of DataItems
+ that has been finished.
+ current_spend (google.type.money_pb2.Money):
+ Output only. Estimated cost(in US dollars)
+ that the DataLabelingJob has incurred to date.
+ create_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. Timestamp when this
+ DataLabelingJob was created.
+ update_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. Timestamp when this
+ DataLabelingJob was updated most recently.
+ error (google.rpc.status_pb2.Status):
+ Output only. DataLabelingJob errors. It is only populated
+ when job's state is ``JOB_STATE_FAILED`` or
+ ``JOB_STATE_CANCELLED``.
+ labels (Sequence[google.cloud.aiplatform_v1.types.DataLabelingJob.LabelsEntry]):
+ The labels with user-defined metadata to organize your
+ DataLabelingJobs.
+
+ Label keys and values can be no longer than 64 characters
+ (Unicode codepoints), can only contain lowercase letters,
+ numeric characters, underscores and dashes. International
+ characters are allowed.
+
+ See https://goo.gl/xmQnxf for more information and examples
+ of labels. System reserved label keys are prefixed with
+ "aiplatform.googleapis.com/" and are immutable. Following
+ system labels exist for each DataLabelingJob:
+
+ - "aiplatform.googleapis.com/schema": output only, its
+ value is the
+ ``inputs_schema``'s
+ title.
+ specialist_pools (Sequence[str]):
+ The SpecialistPools' resource names
+ associated with this job.
+ encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec):
+ Customer-managed encryption key spec for a
+ DataLabelingJob. If set, this DataLabelingJob
+ will be secured by this key.
+ Note: Annotations created in the DataLabelingJob
+ are associated with the EncryptionSpec of the
+ Dataset they are exported to.
+ active_learning_config (google.cloud.aiplatform_v1.types.ActiveLearningConfig):
+ Parameters that configure the active learning
+ pipeline. Active learning will label the data
+ incrementally via several iterations. For every
+ iteration, it will select a batch of data based
+ on the sampling strategy.
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+ display_name = proto.Field(proto.STRING, number=2)
+
+ datasets = proto.RepeatedField(proto.STRING, number=3)
+
+ annotation_labels = proto.MapField(proto.STRING, proto.STRING, number=12)
+
+ labeler_count = proto.Field(proto.INT32, number=4)
+
+ instruction_uri = proto.Field(proto.STRING, number=5)
+
+ inputs_schema_uri = proto.Field(proto.STRING, number=6)
+
+ inputs = proto.Field(proto.MESSAGE, number=7, message=struct.Value,)
+
+ state = proto.Field(proto.ENUM, number=8, enum=job_state.JobState,)
+
+ labeling_progress = proto.Field(proto.INT32, number=13)
+
+ current_spend = proto.Field(proto.MESSAGE, number=14, message=money.Money,)
+
+ create_time = proto.Field(proto.MESSAGE, number=9, message=timestamp.Timestamp,)
+
+ update_time = proto.Field(proto.MESSAGE, number=10, message=timestamp.Timestamp,)
+
+ error = proto.Field(proto.MESSAGE, number=22, message=status.Status,)
+
+ labels = proto.MapField(proto.STRING, proto.STRING, number=11)
+
+ specialist_pools = proto.RepeatedField(proto.STRING, number=16)
+
+ encryption_spec = proto.Field(
+ proto.MESSAGE, number=20, message=gca_encryption_spec.EncryptionSpec,
+ )
+
+ active_learning_config = proto.Field(
+ proto.MESSAGE, number=21, message="ActiveLearningConfig",
+ )
+
+
+class ActiveLearningConfig(proto.Message):
+ r"""Parameters that configure the active learning pipeline.
+ Active learning will label the data incrementally by several
+ iterations. For every iteration, it will select a batch of data
+ based on the sampling strategy.
+
+ Attributes:
+ max_data_item_count (int):
+ Max number of human labeled DataItems.
+ max_data_item_percentage (int):
+ Max percent of total DataItems for human
+ labeling.
+ sample_config (google.cloud.aiplatform_v1.types.SampleConfig):
+ Active learning data sampling config. For
+ every active learning labeling iteration, it
+ will select a batch of data based on the
+ sampling strategy.
+ training_config (google.cloud.aiplatform_v1.types.TrainingConfig):
+ CMLE training config. For every active
+ learning labeling iteration, system will train a
+ machine learning model on CMLE. The trained
+ model will be used by data sampling algorithm to
+ select DataItems.
+ """
+
+ max_data_item_count = proto.Field(
+ proto.INT64, number=1, oneof="human_labeling_budget"
+ )
+
+ max_data_item_percentage = proto.Field(
+ proto.INT32, number=2, oneof="human_labeling_budget"
+ )
+
+ sample_config = proto.Field(proto.MESSAGE, number=3, message="SampleConfig",)
+
+ training_config = proto.Field(proto.MESSAGE, number=4, message="TrainingConfig",)
+
+
+class SampleConfig(proto.Message):
+ r"""Active learning data sampling config. For every active
+ learning labeling iteration, it will select a batch of data
+ based on the sampling strategy.
+
+ Attributes:
+ initial_batch_sample_percentage (int):
+ The percentage of data needed to be labeled
+ in the first batch.
+ following_batch_sample_percentage (int):
+ The percentage of data needed to be labeled
+ in each following batch (except the first
+ batch).
+ sample_strategy (google.cloud.aiplatform_v1.types.SampleConfig.SampleStrategy):
+ Field to choose sampling strategy. Sampling
+ strategy will decide which data should be
+ selected for human labeling in every batch.
+ """
+
+ class SampleStrategy(proto.Enum):
+ r"""Sample strategy decides which subset of DataItems should be
+ selected for human labeling in every batch.
+ """
+ SAMPLE_STRATEGY_UNSPECIFIED = 0
+ UNCERTAINTY = 1
+
+ initial_batch_sample_percentage = proto.Field(
+ proto.INT32, number=1, oneof="initial_batch_sample_size"
+ )
+
+ following_batch_sample_percentage = proto.Field(
+ proto.INT32, number=3, oneof="following_batch_sample_size"
+ )
+
+ sample_strategy = proto.Field(proto.ENUM, number=5, enum=SampleStrategy,)
+
+
+class TrainingConfig(proto.Message):
+ r"""CMLE training config. For every active learning labeling
+ iteration, system will train a machine learning model on CMLE.
+ The trained model will be used by data sampling algorithm to
+ select DataItems.
+
+ Attributes:
+ timeout_training_milli_hours (int):
+ The timeout hours for the CMLE training job,
+ expressed in milli hours i.e. 1,000 value in
+ this field means 1 hour.
+ """
+
+ timeout_training_milli_hours = proto.Field(proto.INT64, number=1)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform_v1/types/dataset.py b/google/cloud/aiplatform_v1/types/dataset.py
new file mode 100644
index 0000000000..2f75dce0d5
--- /dev/null
+++ b/google/cloud/aiplatform_v1/types/dataset.py
@@ -0,0 +1,185 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec
+from google.cloud.aiplatform_v1.types import io
+from google.protobuf import struct_pb2 as struct # type: ignore
+from google.protobuf import timestamp_pb2 as timestamp # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1",
+ manifest={"Dataset", "ImportDataConfig", "ExportDataConfig",},
+)
+
+
+class Dataset(proto.Message):
+ r"""A collection of DataItems and Annotations on them.
+
+ Attributes:
+ name (str):
+ Output only. The resource name of the
+ Dataset.
+ display_name (str):
+ Required. The user-defined name of the
+ Dataset. The name can be up to 128 characters
+ long and can be consist of any UTF-8 characters.
+ metadata_schema_uri (str):
+ Required. Points to a YAML file stored on
+ Google Cloud Storage describing additional
+ information about the Dataset. The schema is
+ defined as an OpenAPI 3.0.2 Schema Object. The
+ schema files that can be used here are found in
+ gs://google-cloud-
+ aiplatform/schema/dataset/metadata/.
+ metadata (google.protobuf.struct_pb2.Value):
+ Required. Additional information about the
+ Dataset.
+ create_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. Timestamp when this Dataset was
+ created.
+ update_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. Timestamp when this Dataset was
+ last updated.
+ etag (str):
+ Used to perform consistent read-modify-write
+ updates. If not set, a blind "overwrite" update
+ happens.
+ labels (Sequence[google.cloud.aiplatform_v1.types.Dataset.LabelsEntry]):
+ The labels with user-defined metadata to organize your
+ Datasets.
+
+ Label keys and values can be no longer than 64 characters
+ (Unicode codepoints), can only contain lowercase letters,
+ numeric characters, underscores and dashes. International
+ characters are allowed. No more than 64 user labels can be
+ associated with one Dataset (System labels are excluded).
+
+ See https://goo.gl/xmQnxf for more information and examples
+ of labels. System reserved label keys are prefixed with
+ "aiplatform.googleapis.com/" and are immutable. Following
+ system labels exist for each Dataset:
+
+ - "aiplatform.googleapis.com/dataset_metadata_schema":
+ output only, its value is the
+ [metadata_schema's][google.cloud.aiplatform.v1.Dataset.metadata_schema_uri]
+ title.
+ encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec):
+ Customer-managed encryption key spec for a
+ Dataset. If set, this Dataset and all sub-
+ resources of this Dataset will be secured by
+ this key.
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+ display_name = proto.Field(proto.STRING, number=2)
+
+ metadata_schema_uri = proto.Field(proto.STRING, number=3)
+
+ metadata = proto.Field(proto.MESSAGE, number=8, message=struct.Value,)
+
+ create_time = proto.Field(proto.MESSAGE, number=4, message=timestamp.Timestamp,)
+
+ update_time = proto.Field(proto.MESSAGE, number=5, message=timestamp.Timestamp,)
+
+ etag = proto.Field(proto.STRING, number=6)
+
+ labels = proto.MapField(proto.STRING, proto.STRING, number=7)
+
+ encryption_spec = proto.Field(
+ proto.MESSAGE, number=11, message=gca_encryption_spec.EncryptionSpec,
+ )
+
+
+class ImportDataConfig(proto.Message):
+ r"""Describes the location from where we import data into a
+ Dataset, together with the labels that will be applied to the
+ DataItems and the Annotations.
+
+ Attributes:
+ gcs_source (google.cloud.aiplatform_v1.types.GcsSource):
+ The Google Cloud Storage location for the
+ input content.
+ data_item_labels (Sequence[google.cloud.aiplatform_v1.types.ImportDataConfig.DataItemLabelsEntry]):
+ Labels that will be applied to newly imported DataItems. If
+ an identical DataItem as one being imported already exists
+ in the Dataset, then these labels will be appended to these
+ of the already existing one, and if labels with identical
+ key is imported before, the old label value will be
+ overwritten. If two DataItems are identical in the same
+ import data operation, the labels will be combined and if
+ key collision happens in this case, one of the values will
+ be picked randomly. Two DataItems are considered identical
+ if their content bytes are identical (e.g. image bytes or
+ pdf bytes). These labels will be overridden by Annotation
+ labels specified inside index file referenced by
+ ``import_schema_uri``,
+ e.g. jsonl file.
+ import_schema_uri (str):
+ Required. Points to a YAML file stored on Google Cloud
+ Storage describing the import format. Validation will be
+ done against the schema. The schema is defined as an
+ `OpenAPI 3.0.2 Schema
+ Object `__.
+ """
+
+ gcs_source = proto.Field(
+ proto.MESSAGE, number=1, oneof="source", message=io.GcsSource,
+ )
+
+ data_item_labels = proto.MapField(proto.STRING, proto.STRING, number=2)
+
+ import_schema_uri = proto.Field(proto.STRING, number=4)
+
+
+class ExportDataConfig(proto.Message):
+ r"""Describes what part of the Dataset is to be exported, the
+ destination of the export and how to export.
+
+ Attributes:
+ gcs_destination (google.cloud.aiplatform_v1.types.GcsDestination):
+ The Google Cloud Storage location where the output is to be
+ written to. In the given directory a new directory will be
+ created with name:
+ ``export-data--``
+ where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601
+ format. All export output will be written into that
+ directory. Inside that directory, annotations with the same
+ schema will be grouped into sub directories which are named
+ with the corresponding annotations' schema title. Inside
+ these sub directories, a schema.yaml will be created to
+ describe the output format.
+ annotations_filter (str):
+ A filter on Annotations of the Dataset. Only Annotations on
+ to-be-exported DataItems(specified by [data_items_filter][])
+ that match this filter will be exported. The filter syntax
+ is the same as in
+ ``ListAnnotations``.
+ """
+
+ gcs_destination = proto.Field(
+ proto.MESSAGE, number=1, oneof="destination", message=io.GcsDestination,
+ )
+
+ annotations_filter = proto.Field(proto.STRING, number=2)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform_v1/types/dataset_service.py b/google/cloud/aiplatform_v1/types/dataset_service.py
new file mode 100644
index 0000000000..ccc8cce600
--- /dev/null
+++ b/google/cloud/aiplatform_v1/types/dataset_service.py
@@ -0,0 +1,446 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+from google.cloud.aiplatform_v1.types import annotation
+from google.cloud.aiplatform_v1.types import data_item
+from google.cloud.aiplatform_v1.types import dataset as gca_dataset
+from google.cloud.aiplatform_v1.types import operation
+from google.protobuf import field_mask_pb2 as field_mask # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1",
+ manifest={
+ "CreateDatasetRequest",
+ "CreateDatasetOperationMetadata",
+ "GetDatasetRequest",
+ "UpdateDatasetRequest",
+ "ListDatasetsRequest",
+ "ListDatasetsResponse",
+ "DeleteDatasetRequest",
+ "ImportDataRequest",
+ "ImportDataResponse",
+ "ImportDataOperationMetadata",
+ "ExportDataRequest",
+ "ExportDataResponse",
+ "ExportDataOperationMetadata",
+ "ListDataItemsRequest",
+ "ListDataItemsResponse",
+ "GetAnnotationSpecRequest",
+ "ListAnnotationsRequest",
+ "ListAnnotationsResponse",
+ },
+)
+
+
+class CreateDatasetRequest(proto.Message):
+ r"""Request message for
+ ``DatasetService.CreateDataset``.
+
+ Attributes:
+ parent (str):
+ Required. The resource name of the Location to create the
+ Dataset in. Format:
+ ``projects/{project}/locations/{location}``
+ dataset (google.cloud.aiplatform_v1.types.Dataset):
+ Required. The Dataset to create.
+ """
+
+ parent = proto.Field(proto.STRING, number=1)
+
+ dataset = proto.Field(proto.MESSAGE, number=2, message=gca_dataset.Dataset,)
+
+
+class CreateDatasetOperationMetadata(proto.Message):
+ r"""Runtime operation information for
+ ``DatasetService.CreateDataset``.
+
+ Attributes:
+ generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata):
+ The operation generic information.
+ """
+
+ generic_metadata = proto.Field(
+ proto.MESSAGE, number=1, message=operation.GenericOperationMetadata,
+ )
+
+
+class GetDatasetRequest(proto.Message):
+ r"""Request message for
+ ``DatasetService.GetDataset``.
+
+ Attributes:
+ name (str):
+ Required. The name of the Dataset resource.
+ read_mask (google.protobuf.field_mask_pb2.FieldMask):
+ Mask specifying which fields to read.
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+ read_mask = proto.Field(proto.MESSAGE, number=2, message=field_mask.FieldMask,)
+
+
+class UpdateDatasetRequest(proto.Message):
+ r"""Request message for
+ ``DatasetService.UpdateDataset``.
+
+ Attributes:
+ dataset (google.cloud.aiplatform_v1.types.Dataset):
+ Required. The Dataset which replaces the
+ resource on the server.
+ update_mask (google.protobuf.field_mask_pb2.FieldMask):
+ Required. The update mask applies to the resource. For the
+ ``FieldMask`` definition, see
+ `FieldMask `__.
+ Updatable fields:
+
+ - ``display_name``
+ - ``description``
+ - ``labels``
+ """
+
+ dataset = proto.Field(proto.MESSAGE, number=1, message=gca_dataset.Dataset,)
+
+ update_mask = proto.Field(proto.MESSAGE, number=2, message=field_mask.FieldMask,)
+
+
+class ListDatasetsRequest(proto.Message):
+ r"""Request message for
+ ``DatasetService.ListDatasets``.
+
+ Attributes:
+ parent (str):
+ Required. The name of the Dataset's parent resource. Format:
+ ``projects/{project}/locations/{location}``
+ filter (str):
+ An expression for filtering the results of the request. For
+ field names both snake_case and camelCase are supported.
+
+ - ``display_name``: supports = and !=
+ - ``metadata_schema_uri``: supports = and !=
+ - ``labels`` supports general map functions that is:
+
+ - ``labels.key=value`` - key:value equality
+ - \`labels.key:\* or labels:key - key existence
+ - A key including a space must be quoted.
+ ``labels."a key"``.
+
+ Some examples:
+
+ - ``displayName="myDisplayName"``
+ - ``labels.myKey="myValue"``
+ page_size (int):
+ The standard list page size.
+ page_token (str):
+ The standard list page token.
+ read_mask (google.protobuf.field_mask_pb2.FieldMask):
+ Mask specifying which fields to read.
+ order_by (str):
+ A comma-separated list of fields to order by, sorted in
+ ascending order. Use "desc" after a field name for
+ descending. Supported fields:
+
+ - ``display_name``
+ - ``create_time``
+ - ``update_time``
+ """
+
+ parent = proto.Field(proto.STRING, number=1)
+
+ filter = proto.Field(proto.STRING, number=2)
+
+ page_size = proto.Field(proto.INT32, number=3)
+
+ page_token = proto.Field(proto.STRING, number=4)
+
+ read_mask = proto.Field(proto.MESSAGE, number=5, message=field_mask.FieldMask,)
+
+ order_by = proto.Field(proto.STRING, number=6)
+
+
+class ListDatasetsResponse(proto.Message):
+ r"""Response message for
+ ``DatasetService.ListDatasets``.
+
+ Attributes:
+ datasets (Sequence[google.cloud.aiplatform_v1.types.Dataset]):
+ A list of Datasets that matches the specified
+ filter in the request.
+ next_page_token (str):
+ The standard List next-page token.
+ """
+
+ @property
+ def raw_page(self):
+ return self
+
+ datasets = proto.RepeatedField(
+ proto.MESSAGE, number=1, message=gca_dataset.Dataset,
+ )
+
+ next_page_token = proto.Field(proto.STRING, number=2)
+
+
+class DeleteDatasetRequest(proto.Message):
+ r"""Request message for
+ ``DatasetService.DeleteDataset``.
+
+ Attributes:
+ name (str):
+ Required. The resource name of the Dataset to delete.
+ Format:
+ ``projects/{project}/locations/{location}/datasets/{dataset}``
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+
+class ImportDataRequest(proto.Message):
+ r"""Request message for
+ ``DatasetService.ImportData``.
+
+ Attributes:
+ name (str):
+ Required. The name of the Dataset resource. Format:
+ ``projects/{project}/locations/{location}/datasets/{dataset}``
+ import_configs (Sequence[google.cloud.aiplatform_v1.types.ImportDataConfig]):
+ Required. The desired input locations. The
+ contents of all input locations will be imported
+ in one batch.
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+ import_configs = proto.RepeatedField(
+ proto.MESSAGE, number=2, message=gca_dataset.ImportDataConfig,
+ )
+
+
+class ImportDataResponse(proto.Message):
+ r"""Response message for
+ ``DatasetService.ImportData``.
+ """
+
+
+class ImportDataOperationMetadata(proto.Message):
+ r"""Runtime operation information for
+ ``DatasetService.ImportData``.
+
+ Attributes:
+ generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata):
+ The common part of the operation metadata.
+ """
+
+ generic_metadata = proto.Field(
+ proto.MESSAGE, number=1, message=operation.GenericOperationMetadata,
+ )
+
+
+class ExportDataRequest(proto.Message):
+ r"""Request message for
+ ``DatasetService.ExportData``.
+
+ Attributes:
+ name (str):
+ Required. The name of the Dataset resource. Format:
+ ``projects/{project}/locations/{location}/datasets/{dataset}``
+ export_config (google.cloud.aiplatform_v1.types.ExportDataConfig):
+ Required. The desired output location.
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+ export_config = proto.Field(
+ proto.MESSAGE, number=2, message=gca_dataset.ExportDataConfig,
+ )
+
+
+class ExportDataResponse(proto.Message):
+ r"""Response message for
+ ``DatasetService.ExportData``.
+
+ Attributes:
+ exported_files (Sequence[str]):
+ All of the files that are exported in this
+ export operation.
+ """
+
+ exported_files = proto.RepeatedField(proto.STRING, number=1)
+
+
+class ExportDataOperationMetadata(proto.Message):
+ r"""Runtime operation information for
+ ``DatasetService.ExportData``.
+
+ Attributes:
+ generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata):
+ The common part of the operation metadata.
+ gcs_output_directory (str):
+ A Google Cloud Storage directory which path
+ ends with '/'. The exported data is stored in
+ the directory.
+ """
+
+ generic_metadata = proto.Field(
+ proto.MESSAGE, number=1, message=operation.GenericOperationMetadata,
+ )
+
+ gcs_output_directory = proto.Field(proto.STRING, number=2)
+
+
+class ListDataItemsRequest(proto.Message):
+ r"""Request message for
+ ``DatasetService.ListDataItems``.
+
+ Attributes:
+ parent (str):
+ Required. The resource name of the Dataset to list DataItems
+ from. Format:
+ ``projects/{project}/locations/{location}/datasets/{dataset}``
+ filter (str):
+ The standard list filter.
+ page_size (int):
+ The standard list page size.
+ page_token (str):
+ The standard list page token.
+ read_mask (google.protobuf.field_mask_pb2.FieldMask):
+ Mask specifying which fields to read.
+ order_by (str):
+ A comma-separated list of fields to order by,
+ sorted in ascending order. Use "desc" after a
+ field name for descending.
+ """
+
+ parent = proto.Field(proto.STRING, number=1)
+
+ filter = proto.Field(proto.STRING, number=2)
+
+ page_size = proto.Field(proto.INT32, number=3)
+
+ page_token = proto.Field(proto.STRING, number=4)
+
+ read_mask = proto.Field(proto.MESSAGE, number=5, message=field_mask.FieldMask,)
+
+ order_by = proto.Field(proto.STRING, number=6)
+
+
+class ListDataItemsResponse(proto.Message):
+ r"""Response message for
+ ``DatasetService.ListDataItems``.
+
+ Attributes:
+ data_items (Sequence[google.cloud.aiplatform_v1.types.DataItem]):
+ A list of DataItems that matches the
+ specified filter in the request.
+ next_page_token (str):
+ The standard List next-page token.
+ """
+
+ @property
+ def raw_page(self):
+ return self
+
+ data_items = proto.RepeatedField(
+ proto.MESSAGE, number=1, message=data_item.DataItem,
+ )
+
+ next_page_token = proto.Field(proto.STRING, number=2)
+
+
+class GetAnnotationSpecRequest(proto.Message):
+ r"""Request message for
+ ``DatasetService.GetAnnotationSpec``.
+
+ Attributes:
+ name (str):
+ Required. The name of the AnnotationSpec resource. Format:
+
+ ``projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}``
+ read_mask (google.protobuf.field_mask_pb2.FieldMask):
+ Mask specifying which fields to read.
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+ read_mask = proto.Field(proto.MESSAGE, number=2, message=field_mask.FieldMask,)
+
+
+class ListAnnotationsRequest(proto.Message):
+ r"""Request message for
+ ``DatasetService.ListAnnotations``.
+
+ Attributes:
+ parent (str):
+ Required. The resource name of the DataItem to list
+ Annotations from. Format:
+
+ ``projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}``
+ filter (str):
+ The standard list filter.
+ page_size (int):
+ The standard list page size.
+ page_token (str):
+ The standard list page token.
+ read_mask (google.protobuf.field_mask_pb2.FieldMask):
+ Mask specifying which fields to read.
+ order_by (str):
+ A comma-separated list of fields to order by,
+ sorted in ascending order. Use "desc" after a
+ field name for descending.
+ """
+
+ parent = proto.Field(proto.STRING, number=1)
+
+ filter = proto.Field(proto.STRING, number=2)
+
+ page_size = proto.Field(proto.INT32, number=3)
+
+ page_token = proto.Field(proto.STRING, number=4)
+
+ read_mask = proto.Field(proto.MESSAGE, number=5, message=field_mask.FieldMask,)
+
+ order_by = proto.Field(proto.STRING, number=6)
+
+
+class ListAnnotationsResponse(proto.Message):
+ r"""Response message for
+ ``DatasetService.ListAnnotations``.
+
+ Attributes:
+ annotations (Sequence[google.cloud.aiplatform_v1.types.Annotation]):
+ A list of Annotations that matches the
+ specified filter in the request.
+ next_page_token (str):
+ The standard List next-page token.
+ """
+
+ @property
+ def raw_page(self):
+ return self
+
+ annotations = proto.RepeatedField(
+ proto.MESSAGE, number=1, message=annotation.Annotation,
+ )
+
+ next_page_token = proto.Field(proto.STRING, number=2)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform_v1/types/deployed_model_ref.py b/google/cloud/aiplatform_v1/types/deployed_model_ref.py
new file mode 100644
index 0000000000..2d53610ed5
--- /dev/null
+++ b/google/cloud/aiplatform_v1/types/deployed_model_ref.py
@@ -0,0 +1,42 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1", manifest={"DeployedModelRef",},
+)
+
+
+class DeployedModelRef(proto.Message):
+ r"""Points to a DeployedModel.
+
+ Attributes:
+ endpoint (str):
+ Immutable. A resource name of an Endpoint.
+ deployed_model_id (str):
+ Immutable. An ID of a DeployedModel in the
+ above Endpoint.
+ """
+
+ endpoint = proto.Field(proto.STRING, number=1)
+
+ deployed_model_id = proto.Field(proto.STRING, number=2)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform_v1/types/encryption_spec.py b/google/cloud/aiplatform_v1/types/encryption_spec.py
new file mode 100644
index 0000000000..ae908d4b72
--- /dev/null
+++ b/google/cloud/aiplatform_v1/types/encryption_spec.py
@@ -0,0 +1,43 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1", manifest={"EncryptionSpec",},
+)
+
+
+class EncryptionSpec(proto.Message):
+ r"""Represents a customer-managed encryption key spec that can be
+ applied to a top-level resource.
+
+ Attributes:
+ kms_key_name (str):
+ Required. The Cloud KMS resource identifier of the customer
+ managed encryption key used to protect a resource. Has the
+ form:
+ ``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
+ The key needs to be in the same region as where the compute
+ resource is created.
+ """
+
+ kms_key_name = proto.Field(proto.STRING, number=1)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform_v1/types/endpoint.py b/google/cloud/aiplatform_v1/types/endpoint.py
new file mode 100644
index 0000000000..5cbe3c1b1d
--- /dev/null
+++ b/google/cloud/aiplatform_v1/types/endpoint.py
@@ -0,0 +1,200 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec
+from google.cloud.aiplatform_v1.types import machine_resources
+from google.protobuf import timestamp_pb2 as timestamp # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1", manifest={"Endpoint", "DeployedModel",},
+)
+
+
+class Endpoint(proto.Message):
+ r"""Models are deployed into it, and afterwards Endpoint is
+ called to obtain predictions and explanations.
+
+ Attributes:
+ name (str):
+ Output only. The resource name of the
+ Endpoint.
+ display_name (str):
+ Required. The display name of the Endpoint.
+ The name can be up to 128 characters long and
+ can be consist of any UTF-8 characters.
+ description (str):
+ The description of the Endpoint.
+ deployed_models (Sequence[google.cloud.aiplatform_v1.types.DeployedModel]):
+ Output only. The models deployed in this Endpoint. To add or
+ remove DeployedModels use
+ ``EndpointService.DeployModel``
+ and
+ ``EndpointService.UndeployModel``
+ respectively.
+ traffic_split (Sequence[google.cloud.aiplatform_v1.types.Endpoint.TrafficSplitEntry]):
+ A map from a DeployedModel's ID to the
+ percentage of this Endpoint's traffic that
+ should be forwarded to that DeployedModel.
+ If a DeployedModel's ID is not listed in this
+ map, then it receives no traffic.
+
+ The traffic percentage values must add up to
+ 100, or map must be empty if the Endpoint is to
+ not accept any traffic at a moment.
+ etag (str):
+ Used to perform consistent read-modify-write
+ updates. If not set, a blind "overwrite" update
+ happens.
+ labels (Sequence[google.cloud.aiplatform_v1.types.Endpoint.LabelsEntry]):
+ The labels with user-defined metadata to
+ organize your Endpoints.
+ Label keys and values can be no longer than 64
+ characters (Unicode codepoints), can only
+ contain lowercase letters, numeric characters,
+ underscores and dashes. International characters
+ are allowed.
+ See https://goo.gl/xmQnxf for more information
+ and examples of labels.
+ create_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. Timestamp when this Endpoint was
+ created.
+ update_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. Timestamp when this Endpoint was
+ last updated.
+ encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec):
+ Customer-managed encryption key spec for an
+ Endpoint. If set, this Endpoint and all sub-
+ resources of this Endpoint will be secured by
+ this key.
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+ display_name = proto.Field(proto.STRING, number=2)
+
+ description = proto.Field(proto.STRING, number=3)
+
+ deployed_models = proto.RepeatedField(
+ proto.MESSAGE, number=4, message="DeployedModel",
+ )
+
+ traffic_split = proto.MapField(proto.STRING, proto.INT32, number=5)
+
+ etag = proto.Field(proto.STRING, number=6)
+
+ labels = proto.MapField(proto.STRING, proto.STRING, number=7)
+
+ create_time = proto.Field(proto.MESSAGE, number=8, message=timestamp.Timestamp,)
+
+ update_time = proto.Field(proto.MESSAGE, number=9, message=timestamp.Timestamp,)
+
+ encryption_spec = proto.Field(
+ proto.MESSAGE, number=10, message=gca_encryption_spec.EncryptionSpec,
+ )
+
+
+class DeployedModel(proto.Message):
+ r"""A deployment of a Model. Endpoints contain one or more
+ DeployedModels.
+
+ Attributes:
+ dedicated_resources (google.cloud.aiplatform_v1.types.DedicatedResources):
+ A description of resources that are dedicated
+ to the DeployedModel, and that need a higher
+ degree of manual configuration.
+ automatic_resources (google.cloud.aiplatform_v1.types.AutomaticResources):
+ A description of resources that to large
+ degree are decided by AI Platform, and require
+ only a modest additional configuration.
+ id (str):
+ Output only. The ID of the DeployedModel.
+ model (str):
+ Required. The name of the Model that this is
+ the deployment of. Note that the Model may be in
+ a different location than the DeployedModel's
+ Endpoint.
+ display_name (str):
+ The display name of the DeployedModel. If not provided upon
+ creation, the Model's display_name is used.
+ create_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. Timestamp when the DeployedModel
+ was created.
+ service_account (str):
+ The service account that the DeployedModel's container runs
+ as. Specify the email address of the service account. If
+ this service account is not specified, the container runs as
+ a service account that doesn't have access to the resource
+ project.
+
+ Users deploying the Model must have the
+ ``iam.serviceAccounts.actAs`` permission on this service
+ account.
+ disable_container_logging (bool):
+ For custom-trained Models and AutoML Tabular Models, the
+ container of the DeployedModel instances will send
+ ``stderr`` and ``stdout`` streams to Stackdriver Logging by
+ default. Please note that the logs incur cost, which are
+ subject to `Cloud Logging
+ pricing `__.
+
+ User can disable container logging by setting this flag to
+ true.
+ enable_access_logging (bool):
+ These logs are like standard server access
+ logs, containing information like timestamp and
+ latency for each prediction request.
+ Note that Stackdriver logs may incur a cost,
+ especially if your project receives prediction
+ requests at a high queries per second rate
+ (QPS). Estimate your costs before enabling this
+ option.
+ """
+
+ dedicated_resources = proto.Field(
+ proto.MESSAGE,
+ number=7,
+ oneof="prediction_resources",
+ message=machine_resources.DedicatedResources,
+ )
+
+ automatic_resources = proto.Field(
+ proto.MESSAGE,
+ number=8,
+ oneof="prediction_resources",
+ message=machine_resources.AutomaticResources,
+ )
+
+ id = proto.Field(proto.STRING, number=1)
+
+ model = proto.Field(proto.STRING, number=2)
+
+ display_name = proto.Field(proto.STRING, number=3)
+
+ create_time = proto.Field(proto.MESSAGE, number=6, message=timestamp.Timestamp,)
+
+ service_account = proto.Field(proto.STRING, number=11)
+
+ disable_container_logging = proto.Field(proto.BOOL, number=15)
+
+ enable_access_logging = proto.Field(proto.BOOL, number=13)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform_v1/types/endpoint_service.py b/google/cloud/aiplatform_v1/types/endpoint_service.py
new file mode 100644
index 0000000000..24e00bd486
--- /dev/null
+++ b/google/cloud/aiplatform_v1/types/endpoint_service.py
@@ -0,0 +1,337 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+from google.cloud.aiplatform_v1.types import endpoint as gca_endpoint
+from google.cloud.aiplatform_v1.types import operation
+from google.protobuf import field_mask_pb2 as field_mask # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1",
+ manifest={
+ "CreateEndpointRequest",
+ "CreateEndpointOperationMetadata",
+ "GetEndpointRequest",
+ "ListEndpointsRequest",
+ "ListEndpointsResponse",
+ "UpdateEndpointRequest",
+ "DeleteEndpointRequest",
+ "DeployModelRequest",
+ "DeployModelResponse",
+ "DeployModelOperationMetadata",
+ "UndeployModelRequest",
+ "UndeployModelResponse",
+ "UndeployModelOperationMetadata",
+ },
+)
+
+
+class CreateEndpointRequest(proto.Message):
+ r"""Request message for
+ ``EndpointService.CreateEndpoint``.
+
+ Attributes:
+ parent (str):
+ Required. The resource name of the Location to create the
+ Endpoint in. Format:
+ ``projects/{project}/locations/{location}``
+ endpoint (google.cloud.aiplatform_v1.types.Endpoint):
+ Required. The Endpoint to create.
+ """
+
+ parent = proto.Field(proto.STRING, number=1)
+
+ endpoint = proto.Field(proto.MESSAGE, number=2, message=gca_endpoint.Endpoint,)
+
+
+class CreateEndpointOperationMetadata(proto.Message):
+ r"""Runtime operation information for
+ ``EndpointService.CreateEndpoint``.
+
+ Attributes:
+ generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata):
+ The operation generic information.
+ """
+
+ generic_metadata = proto.Field(
+ proto.MESSAGE, number=1, message=operation.GenericOperationMetadata,
+ )
+
+
+class GetEndpointRequest(proto.Message):
+ r"""Request message for
+ ``EndpointService.GetEndpoint``
+
+ Attributes:
+ name (str):
+ Required. The name of the Endpoint resource. Format:
+ ``projects/{project}/locations/{location}/endpoints/{endpoint}``
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+
+class ListEndpointsRequest(proto.Message):
+ r"""Request message for
+ ``EndpointService.ListEndpoints``.
+
+ Attributes:
+ parent (str):
+ Required. The resource name of the Location from which to
+ list the Endpoints. Format:
+ ``projects/{project}/locations/{location}``
+ filter (str):
+ Optional. An expression for filtering the results of the
+ request. For field names both snake_case and camelCase are
+ supported.
+
+ - ``endpoint`` supports = and !=. ``endpoint`` represents
+ the Endpoint ID, i.e. the last segment of the Endpoint's
+ [resource
+ name][google.cloud.aiplatform.v1.Endpoint.name].
+ - ``display_name`` supports = and, !=
+ - ``labels`` supports general map functions that is:
+
+ - ``labels.key=value`` - key:value equality
+ - \`labels.key:\* or labels:key - key existence
+ - A key including a space must be quoted.
+ ``labels."a key"``.
+
+ Some examples:
+
+ - ``endpoint=1``
+ - ``displayName="myDisplayName"``
+ - ``labels.myKey="myValue"``
+ page_size (int):
+ Optional. The standard list page size.
+ page_token (str):
+ Optional. The standard list page token. Typically obtained
+ via
+ ``ListEndpointsResponse.next_page_token``
+ of the previous
+ ``EndpointService.ListEndpoints``
+ call.
+ read_mask (google.protobuf.field_mask_pb2.FieldMask):
+ Optional. Mask specifying which fields to
+ read.
+ order_by (str):
+ A comma-separated list of fields to order by, sorted in
+ ascending order. Use "desc" after a field name for
+ descending. Supported fields:
+
+ - ``display_name``
+ - ``create_time``
+ - ``update_time``
+
+ Example: ``display_name, create_time desc``.
+ """
+
+ parent = proto.Field(proto.STRING, number=1)
+
+ filter = proto.Field(proto.STRING, number=2)
+
+ page_size = proto.Field(proto.INT32, number=3)
+
+ page_token = proto.Field(proto.STRING, number=4)
+
+ read_mask = proto.Field(proto.MESSAGE, number=5, message=field_mask.FieldMask,)
+
+ order_by = proto.Field(proto.STRING, number=6)
+
+
+class ListEndpointsResponse(proto.Message):
+ r"""Response message for
+ ``EndpointService.ListEndpoints``.
+
+ Attributes:
+ endpoints (Sequence[google.cloud.aiplatform_v1.types.Endpoint]):
+ List of Endpoints in the requested page.
+ next_page_token (str):
+ A token to retrieve the next page of results. Pass to
+ ``ListEndpointsRequest.page_token``
+ to obtain that page.
+ """
+
+ @property
+ def raw_page(self):
+ return self
+
+ endpoints = proto.RepeatedField(
+ proto.MESSAGE, number=1, message=gca_endpoint.Endpoint,
+ )
+
+ next_page_token = proto.Field(proto.STRING, number=2)
+
+
+class UpdateEndpointRequest(proto.Message):
+ r"""Request message for
+ ``EndpointService.UpdateEndpoint``.
+
+ Attributes:
+ endpoint (google.cloud.aiplatform_v1.types.Endpoint):
+ Required. The Endpoint which replaces the
+ resource on the server.
+ update_mask (google.protobuf.field_mask_pb2.FieldMask):
+ Required. The update mask applies to the resource. See
+ `FieldMask `__.
+ """
+
+ endpoint = proto.Field(proto.MESSAGE, number=1, message=gca_endpoint.Endpoint,)
+
+ update_mask = proto.Field(proto.MESSAGE, number=2, message=field_mask.FieldMask,)
+
+
+class DeleteEndpointRequest(proto.Message):
+ r"""Request message for
+ ``EndpointService.DeleteEndpoint``.
+
+ Attributes:
+ name (str):
+ Required. The name of the Endpoint resource to be deleted.
+ Format:
+ ``projects/{project}/locations/{location}/endpoints/{endpoint}``
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+
+class DeployModelRequest(proto.Message):
+ r"""Request message for
+ ``EndpointService.DeployModel``.
+
+ Attributes:
+ endpoint (str):
+ Required. The name of the Endpoint resource into which to
+ deploy a Model. Format:
+ ``projects/{project}/locations/{location}/endpoints/{endpoint}``
+ deployed_model (google.cloud.aiplatform_v1.types.DeployedModel):
+ Required. The DeployedModel to be created within the
+ Endpoint. Note that
+ ``Endpoint.traffic_split``
+ must be updated for the DeployedModel to start receiving
+ traffic, either as part of this call, or via
+ ``EndpointService.UpdateEndpoint``.
+ traffic_split (Sequence[google.cloud.aiplatform_v1.types.DeployModelRequest.TrafficSplitEntry]):
+ A map from a DeployedModel's ID to the percentage of this
+ Endpoint's traffic that should be forwarded to that
+ DeployedModel.
+
+ If this field is non-empty, then the Endpoint's
+ ``traffic_split``
+ will be overwritten with it. To refer to the ID of the just
+ being deployed Model, a "0" should be used, and the actual
+ ID of the new DeployedModel will be filled in its place by
+ this method. The traffic percentage values must add up to
+ 100.
+
+ If this field is empty, then the Endpoint's
+ ``traffic_split``
+ is not updated.
+ """
+
+ endpoint = proto.Field(proto.STRING, number=1)
+
+ deployed_model = proto.Field(
+ proto.MESSAGE, number=2, message=gca_endpoint.DeployedModel,
+ )
+
+ traffic_split = proto.MapField(proto.STRING, proto.INT32, number=3)
+
+
+class DeployModelResponse(proto.Message):
+ r"""Response message for
+ ``EndpointService.DeployModel``.
+
+ Attributes:
+ deployed_model (google.cloud.aiplatform_v1.types.DeployedModel):
+ The DeployedModel that had been deployed in
+ the Endpoint.
+ """
+
+ deployed_model = proto.Field(
+ proto.MESSAGE, number=1, message=gca_endpoint.DeployedModel,
+ )
+
+
+class DeployModelOperationMetadata(proto.Message):
+ r"""Runtime operation information for
+ ``EndpointService.DeployModel``.
+
+ Attributes:
+ generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata):
+ The operation generic information.
+ """
+
+ generic_metadata = proto.Field(
+ proto.MESSAGE, number=1, message=operation.GenericOperationMetadata,
+ )
+
+
+class UndeployModelRequest(proto.Message):
+ r"""Request message for
+ ``EndpointService.UndeployModel``.
+
+ Attributes:
+ endpoint (str):
+ Required. The name of the Endpoint resource from which to
+ undeploy a Model. Format:
+ ``projects/{project}/locations/{location}/endpoints/{endpoint}``
+ deployed_model_id (str):
+ Required. The ID of the DeployedModel to be
+ undeployed from the Endpoint.
+ traffic_split (Sequence[google.cloud.aiplatform_v1.types.UndeployModelRequest.TrafficSplitEntry]):
+ If this field is provided, then the Endpoint's
+ ``traffic_split``
+ will be overwritten with it. If last DeployedModel is being
+ undeployed from the Endpoint, the [Endpoint.traffic_split]
+ will always end up empty when this call returns. A
+ DeployedModel will be successfully undeployed only if it
+ doesn't have any traffic assigned to it when this method
+ executes, or if this field unassigns any traffic to it.
+ """
+
+ endpoint = proto.Field(proto.STRING, number=1)
+
+ deployed_model_id = proto.Field(proto.STRING, number=2)
+
+ traffic_split = proto.MapField(proto.STRING, proto.INT32, number=3)
+
+
+class UndeployModelResponse(proto.Message):
+ r"""Response message for
+ ``EndpointService.UndeployModel``.
+ """
+
+
+class UndeployModelOperationMetadata(proto.Message):
+ r"""Runtime operation information for
+ ``EndpointService.UndeployModel``.
+
+ Attributes:
+ generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata):
+ The operation generic information.
+ """
+
+ generic_metadata = proto.Field(
+ proto.MESSAGE, number=1, message=operation.GenericOperationMetadata,
+ )
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform_v1/types/env_var.py b/google/cloud/aiplatform_v1/types/env_var.py
new file mode 100644
index 0000000000..f456c15808
--- /dev/null
+++ b/google/cloud/aiplatform_v1/types/env_var.py
@@ -0,0 +1,48 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(package="google.cloud.aiplatform.v1", manifest={"EnvVar",},)
+
+
+class EnvVar(proto.Message):
+ r"""Represents an environment variable present in a Container or
+ Python Module.
+
+ Attributes:
+ name (str):
+ Required. Name of the environment variable.
+ Must be a valid C identifier.
+ value (str):
+ Required. Variables that reference a $(VAR_NAME) are
+ expanded using the previous defined environment variables in
+ the container and any service environment variables. If a
+ variable cannot be resolved, the reference in the input
+ string will be unchanged. The $(VAR_NAME) syntax can be
+ escaped with a double $$, ie: $$(VAR_NAME). Escaped
+ references will never be expanded, regardless of whether the
+ variable exists or not.
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+ value = proto.Field(proto.STRING, number=2)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform_v1/types/hyperparameter_tuning_job.py b/google/cloud/aiplatform_v1/types/hyperparameter_tuning_job.py
new file mode 100644
index 0000000000..63290ff9b4
--- /dev/null
+++ b/google/cloud/aiplatform_v1/types/hyperparameter_tuning_job.py
@@ -0,0 +1,142 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+from google.cloud.aiplatform_v1.types import custom_job
+from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec
+from google.cloud.aiplatform_v1.types import job_state
+from google.cloud.aiplatform_v1.types import study
+from google.protobuf import timestamp_pb2 as timestamp # type: ignore
+from google.rpc import status_pb2 as status # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1", manifest={"HyperparameterTuningJob",},
+)
+
+
+class HyperparameterTuningJob(proto.Message):
+ r"""Represents a HyperparameterTuningJob. A
+ HyperparameterTuningJob has a Study specification and multiple
+ CustomJobs with identical CustomJob specification.
+
+ Attributes:
+ name (str):
+ Output only. Resource name of the
+ HyperparameterTuningJob.
+ display_name (str):
+ Required. The display name of the
+ HyperparameterTuningJob. The name can be up to
+ 128 characters long and can be consist of any
+ UTF-8 characters.
+ study_spec (google.cloud.aiplatform_v1.types.StudySpec):
+ Required. Study configuration of the
+ HyperparameterTuningJob.
+ max_trial_count (int):
+ Required. The desired total number of Trials.
+ parallel_trial_count (int):
+ Required. The desired number of Trials to run
+ in parallel.
+ max_failed_trial_count (int):
+ The number of failed Trials that need to be
+ seen before failing the HyperparameterTuningJob.
+ If set to 0, AI Platform decides how many Trials
+ must fail before the whole job fails.
+ trial_job_spec (google.cloud.aiplatform_v1.types.CustomJobSpec):
+ Required. The spec of a trial job. The same
+ spec applies to the CustomJobs created in all
+ the trials.
+ trials (Sequence[google.cloud.aiplatform_v1.types.Trial]):
+ Output only. Trials of the
+ HyperparameterTuningJob.
+ state (google.cloud.aiplatform_v1.types.JobState):
+ Output only. The detailed state of the job.
+ create_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. Time when the
+ HyperparameterTuningJob was created.
+ start_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. Time when the HyperparameterTuningJob for the
+ first time entered the ``JOB_STATE_RUNNING`` state.
+ end_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. Time when the HyperparameterTuningJob entered
+ any of the following states: ``JOB_STATE_SUCCEEDED``,
+ ``JOB_STATE_FAILED``, ``JOB_STATE_CANCELLED``.
+ update_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. Time when the
+ HyperparameterTuningJob was most recently
+ updated.
+ error (google.rpc.status_pb2.Status):
+ Output only. Only populated when job's state is
+ JOB_STATE_FAILED or JOB_STATE_CANCELLED.
+ labels (Sequence[google.cloud.aiplatform_v1.types.HyperparameterTuningJob.LabelsEntry]):
+ The labels with user-defined metadata to
+ organize HyperparameterTuningJobs.
+ Label keys and values can be no longer than 64
+ characters (Unicode codepoints), can only
+ contain lowercase letters, numeric characters,
+ underscores and dashes. International characters
+ are allowed.
+ See https://goo.gl/xmQnxf for more information
+ and examples of labels.
+ encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec):
+ Customer-managed encryption key options for a
+ HyperparameterTuningJob. If this is set, then
+ all resources created by the
+ HyperparameterTuningJob will be encrypted with
+ the provided encryption key.
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+ display_name = proto.Field(proto.STRING, number=2)
+
+ study_spec = proto.Field(proto.MESSAGE, number=4, message=study.StudySpec,)
+
+ max_trial_count = proto.Field(proto.INT32, number=5)
+
+ parallel_trial_count = proto.Field(proto.INT32, number=6)
+
+ max_failed_trial_count = proto.Field(proto.INT32, number=7)
+
+ trial_job_spec = proto.Field(
+ proto.MESSAGE, number=8, message=custom_job.CustomJobSpec,
+ )
+
+ trials = proto.RepeatedField(proto.MESSAGE, number=9, message=study.Trial,)
+
+ state = proto.Field(proto.ENUM, number=10, enum=job_state.JobState,)
+
+ create_time = proto.Field(proto.MESSAGE, number=11, message=timestamp.Timestamp,)
+
+ start_time = proto.Field(proto.MESSAGE, number=12, message=timestamp.Timestamp,)
+
+ end_time = proto.Field(proto.MESSAGE, number=13, message=timestamp.Timestamp,)
+
+ update_time = proto.Field(proto.MESSAGE, number=14, message=timestamp.Timestamp,)
+
+ error = proto.Field(proto.MESSAGE, number=15, message=status.Status,)
+
+ labels = proto.MapField(proto.STRING, proto.STRING, number=16)
+
+ encryption_spec = proto.Field(
+ proto.MESSAGE, number=17, message=gca_encryption_spec.EncryptionSpec,
+ )
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform_v1/types/io.py b/google/cloud/aiplatform_v1/types/io.py
new file mode 100644
index 0000000000..1a75ea33bc
--- /dev/null
+++ b/google/cloud/aiplatform_v1/types/io.py
@@ -0,0 +1,120 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1",
+ manifest={
+ "GcsSource",
+ "GcsDestination",
+ "BigQuerySource",
+ "BigQueryDestination",
+ "ContainerRegistryDestination",
+ },
+)
+
+
+class GcsSource(proto.Message):
+ r"""The Google Cloud Storage location for the input content.
+
+ Attributes:
+ uris (Sequence[str]):
+ Required. Google Cloud Storage URI(-s) to the
+ input file(s). May contain wildcards. For more
+ information on wildcards, see
+ https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames.
+ """
+
+ uris = proto.RepeatedField(proto.STRING, number=1)
+
+
+class GcsDestination(proto.Message):
+ r"""The Google Cloud Storage location where the output is to be
+ written to.
+
+ Attributes:
+ output_uri_prefix (str):
+ Required. Google Cloud Storage URI to output
+ directory. If the uri doesn't end with '/', a
+ '/' will be automatically appended. The
+ directory is created if it doesn't exist.
+ """
+
+ output_uri_prefix = proto.Field(proto.STRING, number=1)
+
+
+class BigQuerySource(proto.Message):
+ r"""The BigQuery location for the input content.
+
+ Attributes:
+ input_uri (str):
+ Required. BigQuery URI to a table, up to 2000 characters
+ long. Accepted forms:
+
+ - BigQuery path. For example:
+ ``bq://projectId.bqDatasetId.bqTableId``.
+ """
+
+ input_uri = proto.Field(proto.STRING, number=1)
+
+
+class BigQueryDestination(proto.Message):
+ r"""The BigQuery location for the output content.
+
+ Attributes:
+ output_uri (str):
+ Required. BigQuery URI to a project or table, up to 2000
+ characters long.
+
+ When only the project is specified, the Dataset and Table
+ are created. When the full table reference is specified, the
+ Dataset must exist and table must not exist.
+
+ Accepted forms:
+
+ - BigQuery path. For example: ``bq://projectId`` or
+ ``bq://projectId.bqDatasetId.bqTableId``.
+ """
+
+ output_uri = proto.Field(proto.STRING, number=1)
+
+
+class ContainerRegistryDestination(proto.Message):
+ r"""The Container Registry location for the container image.
+
+ Attributes:
+ output_uri (str):
+ Required. Container Registry URI of a container image. Only
+ Google Container Registry and Artifact Registry are
+ supported now. Accepted forms:
+
+ - Google Container Registry path. For example:
+ ``gcr.io/projectId/imageName:tag``.
+
+ - Artifact Registry path. For example:
+ ``us-central1-docker.pkg.dev/projectId/repoName/imageName:tag``.
+
+ If a tag is not specified, "latest" will be used as the
+ default tag.
+ """
+
+ output_uri = proto.Field(proto.STRING, number=1)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform_v1/types/job_service.py b/google/cloud/aiplatform_v1/types/job_service.py
new file mode 100644
index 0000000000..3a6d844ea7
--- /dev/null
+++ b/google/cloud/aiplatform_v1/types/job_service.py
@@ -0,0 +1,619 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+from google.cloud.aiplatform_v1.types import (
+ batch_prediction_job as gca_batch_prediction_job,
+)
+from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job
+from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job
+from google.cloud.aiplatform_v1.types import (
+ hyperparameter_tuning_job as gca_hyperparameter_tuning_job,
+)
+from google.protobuf import field_mask_pb2 as field_mask # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1",
+ manifest={
+ "CreateCustomJobRequest",
+ "GetCustomJobRequest",
+ "ListCustomJobsRequest",
+ "ListCustomJobsResponse",
+ "DeleteCustomJobRequest",
+ "CancelCustomJobRequest",
+ "CreateDataLabelingJobRequest",
+ "GetDataLabelingJobRequest",
+ "ListDataLabelingJobsRequest",
+ "ListDataLabelingJobsResponse",
+ "DeleteDataLabelingJobRequest",
+ "CancelDataLabelingJobRequest",
+ "CreateHyperparameterTuningJobRequest",
+ "GetHyperparameterTuningJobRequest",
+ "ListHyperparameterTuningJobsRequest",
+ "ListHyperparameterTuningJobsResponse",
+ "DeleteHyperparameterTuningJobRequest",
+ "CancelHyperparameterTuningJobRequest",
+ "CreateBatchPredictionJobRequest",
+ "GetBatchPredictionJobRequest",
+ "ListBatchPredictionJobsRequest",
+ "ListBatchPredictionJobsResponse",
+ "DeleteBatchPredictionJobRequest",
+ "CancelBatchPredictionJobRequest",
+ },
+)
+
+
+class CreateCustomJobRequest(proto.Message):
+ r"""Request message for
+ ``JobService.CreateCustomJob``.
+
+ Attributes:
+ parent (str):
+ Required. The resource name of the Location to create the
+ CustomJob in. Format:
+ ``projects/{project}/locations/{location}``
+ custom_job (google.cloud.aiplatform_v1.types.CustomJob):
+ Required. The CustomJob to create.
+ """
+
+ parent = proto.Field(proto.STRING, number=1)
+
+ custom_job = proto.Field(proto.MESSAGE, number=2, message=gca_custom_job.CustomJob,)
+
+
+class GetCustomJobRequest(proto.Message):
+ r"""Request message for
+ ``JobService.GetCustomJob``.
+
+ Attributes:
+ name (str):
+ Required. The name of the CustomJob resource. Format:
+ ``projects/{project}/locations/{location}/customJobs/{custom_job}``
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+
+class ListCustomJobsRequest(proto.Message):
+ r"""Request message for
+ ``JobService.ListCustomJobs``.
+
+ Attributes:
+ parent (str):
+ Required. The resource name of the Location to list the
+ CustomJobs from. Format:
+ ``projects/{project}/locations/{location}``
+ filter (str):
+ The standard list filter.
+
+ Supported fields:
+
+ - ``display_name`` supports = and !=.
+
+ - ``state`` supports = and !=.
+
+ Some examples of using the filter are:
+
+ - ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"``
+
+ - ``state="JOB_STATE_RUNNING" OR display_name="my_job"``
+
+ - ``NOT display_name="my_job"``
+
+ - ``state="JOB_STATE_FAILED"``
+ page_size (int):
+ The standard list page size.
+ page_token (str):
+ The standard list page token. Typically obtained via
+ ``ListCustomJobsResponse.next_page_token``
+ of the previous
+ ``JobService.ListCustomJobs``
+ call.
+ read_mask (google.protobuf.field_mask_pb2.FieldMask):
+ Mask specifying which fields to read.
+ """
+
+ parent = proto.Field(proto.STRING, number=1)
+
+ filter = proto.Field(proto.STRING, number=2)
+
+ page_size = proto.Field(proto.INT32, number=3)
+
+ page_token = proto.Field(proto.STRING, number=4)
+
+ read_mask = proto.Field(proto.MESSAGE, number=5, message=field_mask.FieldMask,)
+
+
+class ListCustomJobsResponse(proto.Message):
+ r"""Response message for
+ ``JobService.ListCustomJobs``
+
+ Attributes:
+ custom_jobs (Sequence[google.cloud.aiplatform_v1.types.CustomJob]):
+ List of CustomJobs in the requested page.
+ next_page_token (str):
+ A token to retrieve the next page of results. Pass to
+ ``ListCustomJobsRequest.page_token``
+ to obtain that page.
+ """
+
+ @property
+ def raw_page(self):
+ return self
+
+ custom_jobs = proto.RepeatedField(
+ proto.MESSAGE, number=1, message=gca_custom_job.CustomJob,
+ )
+
+ next_page_token = proto.Field(proto.STRING, number=2)
+
+
+class DeleteCustomJobRequest(proto.Message):
+ r"""Request message for
+ ``JobService.DeleteCustomJob``.
+
+ Attributes:
+ name (str):
+ Required. The name of the CustomJob resource to be deleted.
+ Format:
+ ``projects/{project}/locations/{location}/customJobs/{custom_job}``
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+
+class CancelCustomJobRequest(proto.Message):
+ r"""Request message for
+ ``JobService.CancelCustomJob``.
+
+ Attributes:
+ name (str):
+ Required. The name of the CustomJob to cancel. Format:
+ ``projects/{project}/locations/{location}/customJobs/{custom_job}``
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+
+class CreateDataLabelingJobRequest(proto.Message):
+ r"""Request message for
+ [DataLabelingJobService.CreateDataLabelingJob][].
+
+ Attributes:
+ parent (str):
+ Required. The parent of the DataLabelingJob. Format:
+ ``projects/{project}/locations/{location}``
+ data_labeling_job (google.cloud.aiplatform_v1.types.DataLabelingJob):
+ Required. The DataLabelingJob to create.
+ """
+
+ parent = proto.Field(proto.STRING, number=1)
+
+ data_labeling_job = proto.Field(
+ proto.MESSAGE, number=2, message=gca_data_labeling_job.DataLabelingJob,
+ )
+
+
+class GetDataLabelingJobRequest(proto.Message):
+ r"""Request message for [DataLabelingJobService.GetDataLabelingJob][].
+
+ Attributes:
+ name (str):
+ Required. The name of the DataLabelingJob. Format:
+
+ ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}``
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+
+class ListDataLabelingJobsRequest(proto.Message):
+ r"""Request message for [DataLabelingJobService.ListDataLabelingJobs][].
+
+ Attributes:
+ parent (str):
+ Required. The parent of the DataLabelingJob. Format:
+ ``projects/{project}/locations/{location}``
+ filter (str):
+ The standard list filter.
+
+ Supported fields:
+
+ - ``display_name`` supports = and !=.
+
+ - ``state`` supports = and !=.
+
+ Some examples of using the filter are:
+
+ - ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"``
+
+ - ``state="JOB_STATE_RUNNING" OR display_name="my_job"``
+
+ - ``NOT display_name="my_job"``
+
+ - ``state="JOB_STATE_FAILED"``
+ page_size (int):
+ The standard list page size.
+ page_token (str):
+ The standard list page token.
+ read_mask (google.protobuf.field_mask_pb2.FieldMask):
+ Mask specifying which fields to read. FieldMask represents a
+ set of symbolic field paths. For example, the mask can be
+ ``paths: "name"``. The "name" here is a field in
+ DataLabelingJob. If this field is not set, all fields of the
+ DataLabelingJob are returned.
+ order_by (str):
+ A comma-separated list of fields to order by, sorted in
+ ascending order by default. Use ``desc`` after a field name
+ for descending.
+ """
+
+ parent = proto.Field(proto.STRING, number=1)
+
+ filter = proto.Field(proto.STRING, number=2)
+
+ page_size = proto.Field(proto.INT32, number=3)
+
+ page_token = proto.Field(proto.STRING, number=4)
+
+ read_mask = proto.Field(proto.MESSAGE, number=5, message=field_mask.FieldMask,)
+
+ order_by = proto.Field(proto.STRING, number=6)
+
+
+class ListDataLabelingJobsResponse(proto.Message):
+ r"""Response message for
+ ``JobService.ListDataLabelingJobs``.
+
+ Attributes:
+ data_labeling_jobs (Sequence[google.cloud.aiplatform_v1.types.DataLabelingJob]):
+ A list of DataLabelingJobs that matches the
+ specified filter in the request.
+ next_page_token (str):
+ The standard List next-page token.
+ """
+
+ @property
+ def raw_page(self):
+ return self
+
+ data_labeling_jobs = proto.RepeatedField(
+ proto.MESSAGE, number=1, message=gca_data_labeling_job.DataLabelingJob,
+ )
+
+ next_page_token = proto.Field(proto.STRING, number=2)
+
+
+class DeleteDataLabelingJobRequest(proto.Message):
+ r"""Request message for
+ ``JobService.DeleteDataLabelingJob``.
+
+ Attributes:
+ name (str):
+ Required. The name of the DataLabelingJob to be deleted.
+ Format:
+
+ ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}``
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+
+class CancelDataLabelingJobRequest(proto.Message):
+ r"""Request message for
+ [DataLabelingJobService.CancelDataLabelingJob][].
+
+ Attributes:
+ name (str):
+ Required. The name of the DataLabelingJob. Format:
+
+ ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}``
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+
+class CreateHyperparameterTuningJobRequest(proto.Message):
+ r"""Request message for
+ ``JobService.CreateHyperparameterTuningJob``.
+
+ Attributes:
+ parent (str):
+ Required. The resource name of the Location to create the
+ HyperparameterTuningJob in. Format:
+ ``projects/{project}/locations/{location}``
+ hyperparameter_tuning_job (google.cloud.aiplatform_v1.types.HyperparameterTuningJob):
+ Required. The HyperparameterTuningJob to
+ create.
+ """
+
+ parent = proto.Field(proto.STRING, number=1)
+
+ hyperparameter_tuning_job = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ message=gca_hyperparameter_tuning_job.HyperparameterTuningJob,
+ )
+
+
+class GetHyperparameterTuningJobRequest(proto.Message):
+ r"""Request message for
+ ``JobService.GetHyperparameterTuningJob``.
+
+ Attributes:
+ name (str):
+ Required. The name of the HyperparameterTuningJob resource.
+ Format:
+
+ ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}``
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+
+class ListHyperparameterTuningJobsRequest(proto.Message):
+ r"""Request message for
+ ``JobService.ListHyperparameterTuningJobs``.
+
+ Attributes:
+ parent (str):
+ Required. The resource name of the Location to list the
+ HyperparameterTuningJobs from. Format:
+ ``projects/{project}/locations/{location}``
+ filter (str):
+ The standard list filter.
+
+ Supported fields:
+
+ - ``display_name`` supports = and !=.
+
+ - ``state`` supports = and !=.
+
+ Some examples of using the filter are:
+
+ - ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"``
+
+ - ``state="JOB_STATE_RUNNING" OR display_name="my_job"``
+
+ - ``NOT display_name="my_job"``
+
+ - ``state="JOB_STATE_FAILED"``
+ page_size (int):
+ The standard list page size.
+ page_token (str):
+ The standard list page token. Typically obtained via
+ ``ListHyperparameterTuningJobsResponse.next_page_token``
+ of the previous
+ ``JobService.ListHyperparameterTuningJobs``
+ call.
+ read_mask (google.protobuf.field_mask_pb2.FieldMask):
+ Mask specifying which fields to read.
+ """
+
+ parent = proto.Field(proto.STRING, number=1)
+
+ filter = proto.Field(proto.STRING, number=2)
+
+ page_size = proto.Field(proto.INT32, number=3)
+
+ page_token = proto.Field(proto.STRING, number=4)
+
+ read_mask = proto.Field(proto.MESSAGE, number=5, message=field_mask.FieldMask,)
+
+
+class ListHyperparameterTuningJobsResponse(proto.Message):
+ r"""Response message for
+ ``JobService.ListHyperparameterTuningJobs``
+
+ Attributes:
+ hyperparameter_tuning_jobs (Sequence[google.cloud.aiplatform_v1.types.HyperparameterTuningJob]):
+ List of HyperparameterTuningJobs in the requested page.
+ ``HyperparameterTuningJob.trials``
+ of the jobs will be not be returned.
+ next_page_token (str):
+ A token to retrieve the next page of results. Pass to
+ ``ListHyperparameterTuningJobsRequest.page_token``
+ to obtain that page.
+ """
+
+ @property
+ def raw_page(self):
+ return self
+
+ hyperparameter_tuning_jobs = proto.RepeatedField(
+ proto.MESSAGE,
+ number=1,
+ message=gca_hyperparameter_tuning_job.HyperparameterTuningJob,
+ )
+
+ next_page_token = proto.Field(proto.STRING, number=2)
+
+
+class DeleteHyperparameterTuningJobRequest(proto.Message):
+ r"""Request message for
+ ``JobService.DeleteHyperparameterTuningJob``.
+
+ Attributes:
+ name (str):
+ Required. The name of the HyperparameterTuningJob resource
+ to be deleted. Format:
+
+ ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}``
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+
+class CancelHyperparameterTuningJobRequest(proto.Message):
+ r"""Request message for
+ ``JobService.CancelHyperparameterTuningJob``.
+
+ Attributes:
+ name (str):
+ Required. The name of the HyperparameterTuningJob to cancel.
+ Format:
+
+ ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}``
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+
+class CreateBatchPredictionJobRequest(proto.Message):
+ r"""Request message for
+ ``JobService.CreateBatchPredictionJob``.
+
+ Attributes:
+ parent (str):
+ Required. The resource name of the Location to create the
+ BatchPredictionJob in. Format:
+ ``projects/{project}/locations/{location}``
+ batch_prediction_job (google.cloud.aiplatform_v1.types.BatchPredictionJob):
+ Required. The BatchPredictionJob to create.
+ """
+
+ parent = proto.Field(proto.STRING, number=1)
+
+ batch_prediction_job = proto.Field(
+ proto.MESSAGE, number=2, message=gca_batch_prediction_job.BatchPredictionJob,
+ )
+
+
+class GetBatchPredictionJobRequest(proto.Message):
+ r"""Request message for
+ ``JobService.GetBatchPredictionJob``.
+
+ Attributes:
+ name (str):
+ Required. The name of the BatchPredictionJob resource.
+ Format:
+
+ ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}``
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+
+class ListBatchPredictionJobsRequest(proto.Message):
+ r"""Request message for
+ ``JobService.ListBatchPredictionJobs``.
+
+ Attributes:
+ parent (str):
+ Required. The resource name of the Location to list the
+ BatchPredictionJobs from. Format:
+ ``projects/{project}/locations/{location}``
+ filter (str):
+ The standard list filter.
+
+ Supported fields:
+
+ - ``display_name`` supports = and !=.
+
+ - ``state`` supports = and !=.
+
+ Some examples of using the filter are:
+
+ - ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"``
+
+ - ``state="JOB_STATE_RUNNING" OR display_name="my_job"``
+
+ - ``NOT display_name="my_job"``
+
+ - ``state="JOB_STATE_FAILED"``
+ page_size (int):
+ The standard list page size.
+ page_token (str):
+ The standard list page token. Typically obtained via
+ ``ListBatchPredictionJobsResponse.next_page_token``
+ of the previous
+ ``JobService.ListBatchPredictionJobs``
+ call.
+ read_mask (google.protobuf.field_mask_pb2.FieldMask):
+ Mask specifying which fields to read.
+ """
+
+ parent = proto.Field(proto.STRING, number=1)
+
+ filter = proto.Field(proto.STRING, number=2)
+
+ page_size = proto.Field(proto.INT32, number=3)
+
+ page_token = proto.Field(proto.STRING, number=4)
+
+ read_mask = proto.Field(proto.MESSAGE, number=5, message=field_mask.FieldMask,)
+
+
+class ListBatchPredictionJobsResponse(proto.Message):
+ r"""Response message for
+ ``JobService.ListBatchPredictionJobs``
+
+ Attributes:
+ batch_prediction_jobs (Sequence[google.cloud.aiplatform_v1.types.BatchPredictionJob]):
+ List of BatchPredictionJobs in the requested
+ page.
+ next_page_token (str):
+ A token to retrieve the next page of results. Pass to
+ ``ListBatchPredictionJobsRequest.page_token``
+ to obtain that page.
+ """
+
+ @property
+ def raw_page(self):
+ return self
+
+ batch_prediction_jobs = proto.RepeatedField(
+ proto.MESSAGE, number=1, message=gca_batch_prediction_job.BatchPredictionJob,
+ )
+
+ next_page_token = proto.Field(proto.STRING, number=2)
+
+
+class DeleteBatchPredictionJobRequest(proto.Message):
+ r"""Request message for
+ ``JobService.DeleteBatchPredictionJob``.
+
+ Attributes:
+ name (str):
+ Required. The name of the BatchPredictionJob resource to be
+ deleted. Format:
+
+ ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}``
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+
+class CancelBatchPredictionJobRequest(proto.Message):
+ r"""Request message for
+ ``JobService.CancelBatchPredictionJob``.
+
+ Attributes:
+ name (str):
+ Required. The name of the BatchPredictionJob to cancel.
+ Format:
+
+ ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}``
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform_v1/types/job_state.py b/google/cloud/aiplatform_v1/types/job_state.py
new file mode 100644
index 0000000000..40b1694f86
--- /dev/null
+++ b/google/cloud/aiplatform_v1/types/job_state.py
@@ -0,0 +1,39 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1", manifest={"JobState",},
+)
+
+
+class JobState(proto.Enum):
+ r"""Describes the state of a job."""
+ JOB_STATE_UNSPECIFIED = 0
+ JOB_STATE_QUEUED = 1
+ JOB_STATE_PENDING = 2
+ JOB_STATE_RUNNING = 3
+ JOB_STATE_SUCCEEDED = 4
+ JOB_STATE_FAILED = 5
+ JOB_STATE_CANCELLING = 6
+ JOB_STATE_CANCELLED = 7
+ JOB_STATE_PAUSED = 8
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform_v1/types/machine_resources.py b/google/cloud/aiplatform_v1/types/machine_resources.py
new file mode 100644
index 0000000000..f6864eb798
--- /dev/null
+++ b/google/cloud/aiplatform_v1/types/machine_resources.py
@@ -0,0 +1,212 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+from google.cloud.aiplatform_v1.types import accelerator_type as gca_accelerator_type
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1",
+ manifest={
+ "MachineSpec",
+ "DedicatedResources",
+ "AutomaticResources",
+ "BatchDedicatedResources",
+ "ResourcesConsumed",
+ "DiskSpec",
+ },
+)
+
+
+class MachineSpec(proto.Message):
+ r"""Specification of a single machine.
+
+ Attributes:
+ machine_type (str):
+ Immutable. The type of the machine. For the machine types
+ supported for prediction, see
+ https://tinyurl.com/aip-docs/predictions/machine-types. For
+ machine types supported for creating a custom training job,
+ see https://tinyurl.com/aip-docs/training/configure-compute.
+
+ For
+ ``DeployedModel``
+ this field is optional, and the default value is
+ ``n1-standard-2``. For
+ ``BatchPredictionJob``
+ or as part of
+ ``WorkerPoolSpec``
+ this field is required.
+ accelerator_type (google.cloud.aiplatform_v1.types.AcceleratorType):
+ Immutable. The type of accelerator(s) that may be attached
+ to the machine as per
+ ``accelerator_count``.
+ accelerator_count (int):
+ The number of accelerators to attach to the
+ machine.
+ """
+
+ machine_type = proto.Field(proto.STRING, number=1)
+
+ accelerator_type = proto.Field(
+ proto.ENUM, number=2, enum=gca_accelerator_type.AcceleratorType,
+ )
+
+ accelerator_count = proto.Field(proto.INT32, number=3)
+
+
+class DedicatedResources(proto.Message):
+ r"""A description of resources that are dedicated to a
+ DeployedModel, and that need a higher degree of manual
+ configuration.
+
+ Attributes:
+ machine_spec (google.cloud.aiplatform_v1.types.MachineSpec):
+ Required. Immutable. The specification of a
+ single machine used by the prediction.
+ min_replica_count (int):
+ Required. Immutable. The minimum number of machine replicas
+ this DeployedModel will be always deployed on. If traffic
+ against it increases, it may dynamically be deployed onto
+ more replicas, and as traffic decreases, some of these extra
+ replicas may be freed. Note: if
+ ``machine_spec.accelerator_count``
+ is above 0, currently the model will be always deployed
+ precisely on
+ ``min_replica_count``.
+ max_replica_count (int):
+ Immutable. The maximum number of replicas this DeployedModel
+ may be deployed on when the traffic against it increases. If
+ the requested value is too large, the deployment will error,
+ but if deployment succeeds then the ability to scale the
+ model to that many replicas is guaranteed (barring service
+ outages). If traffic against the DeployedModel increases
+ beyond what its replicas at maximum may handle, a portion of
+ the traffic will be dropped. If this value is not provided,
+ will use
+ ``min_replica_count``
+ as the default value.
+ """
+
+ machine_spec = proto.Field(proto.MESSAGE, number=1, message="MachineSpec",)
+
+ min_replica_count = proto.Field(proto.INT32, number=2)
+
+ max_replica_count = proto.Field(proto.INT32, number=3)
+
+
+class AutomaticResources(proto.Message):
+ r"""A description of resources that to large degree are decided
+ by AI Platform, and require only a modest additional
+ configuration. Each Model supporting these resources documents
+ its specific guidelines.
+
+ Attributes:
+ min_replica_count (int):
+ Immutable. The minimum number of replicas this DeployedModel
+ will be always deployed on. If traffic against it increases,
+ it may dynamically be deployed onto more replicas up to
+ ``max_replica_count``,
+ and as traffic decreases, some of these extra replicas may
+ be freed. If the requested value is too large, the
+ deployment will error.
+ max_replica_count (int):
+ Immutable. The maximum number of replicas
+ this DeployedModel may be deployed on when the
+ traffic against it increases. If the requested
+ value is too large, the deployment will error,
+ but if deployment succeeds then the ability to
+ scale the model to that many replicas is
+ guaranteed (barring service outages). If traffic
+ against the DeployedModel increases beyond what
+ its replicas at maximum may handle, a portion of
+ the traffic will be dropped. If this value is
+ not provided, a no upper bound for scaling under
+ heavy traffic will be assume, though AI Platform
+ may be unable to scale beyond certain replica
+ number.
+ """
+
+ min_replica_count = proto.Field(proto.INT32, number=1)
+
+ max_replica_count = proto.Field(proto.INT32, number=2)
+
+
+class BatchDedicatedResources(proto.Message):
+ r"""A description of resources that are used for performing batch
+ operations, are dedicated to a Model, and need manual
+ configuration.
+
+ Attributes:
+ machine_spec (google.cloud.aiplatform_v1.types.MachineSpec):
+ Required. Immutable. The specification of a
+ single machine.
+ starting_replica_count (int):
+ Immutable. The number of machine replicas used at the start
+ of the batch operation. If not set, AI Platform decides
+ starting number, not greater than
+ ``max_replica_count``
+ max_replica_count (int):
+ Immutable. The maximum number of machine
+ replicas the batch operation may be scaled to.
+ The default value is 10.
+ """
+
+ machine_spec = proto.Field(proto.MESSAGE, number=1, message="MachineSpec",)
+
+ starting_replica_count = proto.Field(proto.INT32, number=2)
+
+ max_replica_count = proto.Field(proto.INT32, number=3)
+
+
+class ResourcesConsumed(proto.Message):
+ r"""Statistics information about resource consumption.
+
+ Attributes:
+ replica_hours (float):
+ Output only. The number of replica hours
+ used. Note that many replicas may run in
+ parallel, and additionally any given work may be
+ queued for some time. Therefore this value is
+ not strictly related to wall time.
+ """
+
+ replica_hours = proto.Field(proto.DOUBLE, number=1)
+
+
+class DiskSpec(proto.Message):
+ r"""Represents the spec of disk options.
+
+ Attributes:
+ boot_disk_type (str):
+ Type of the boot disk (default is "pd-ssd").
+ Valid values: "pd-ssd" (Persistent Disk Solid
+ State Drive) or "pd-standard" (Persistent Disk
+ Hard Disk Drive).
+ boot_disk_size_gb (int):
+ Size in GB of the boot disk (default is
+ 100GB).
+ """
+
+ boot_disk_type = proto.Field(proto.STRING, number=1)
+
+ boot_disk_size_gb = proto.Field(proto.INT32, number=2)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform_v1/types/manual_batch_tuning_parameters.py b/google/cloud/aiplatform_v1/types/manual_batch_tuning_parameters.py
new file mode 100644
index 0000000000..7500d618a0
--- /dev/null
+++ b/google/cloud/aiplatform_v1/types/manual_batch_tuning_parameters.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1", manifest={"ManualBatchTuningParameters",},
+)
+
+
+class ManualBatchTuningParameters(proto.Message):
+ r"""Manual batch tuning parameters.
+
+ Attributes:
+ batch_size (int):
+ Immutable. The number of the records (e.g.
+ instances) of the operation given in each batch
+ to a machine replica. Machine type, and size of
+ a single record should be considered when
+ setting this parameter, higher value speeds up
+ the batch operation's execution, but too high
+ value will result in a whole batch not fitting
+ in a machine's memory, and the whole operation
+ will fail.
+ The default value is 4.
+ """
+
+ batch_size = proto.Field(proto.INT32, number=1)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform_v1/types/migratable_resource.py b/google/cloud/aiplatform_v1/types/migratable_resource.py
new file mode 100644
index 0000000000..652a835c89
--- /dev/null
+++ b/google/cloud/aiplatform_v1/types/migratable_resource.py
@@ -0,0 +1,179 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+from google.protobuf import timestamp_pb2 as timestamp # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1", manifest={"MigratableResource",},
+)
+
+
+class MigratableResource(proto.Message):
+ r"""Represents one resource that exists in automl.googleapis.com,
+ datalabeling.googleapis.com or ml.googleapis.com.
+
+ Attributes:
+ ml_engine_model_version (google.cloud.aiplatform_v1.types.MigratableResource.MlEngineModelVersion):
+ Output only. Represents one Version in
+ ml.googleapis.com.
+ automl_model (google.cloud.aiplatform_v1.types.MigratableResource.AutomlModel):
+ Output only. Represents one Model in
+ automl.googleapis.com.
+ automl_dataset (google.cloud.aiplatform_v1.types.MigratableResource.AutomlDataset):
+ Output only. Represents one Dataset in
+ automl.googleapis.com.
+ data_labeling_dataset (google.cloud.aiplatform_v1.types.MigratableResource.DataLabelingDataset):
+ Output only. Represents one Dataset in
+ datalabeling.googleapis.com.
+ last_migrate_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. Timestamp when the last
+ migration attempt on this MigratableResource
+ started. Will not be set if there's no migration
+ attempt on this MigratableResource.
+ last_update_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. Timestamp when this
+ MigratableResource was last updated.
+ """
+
+ class MlEngineModelVersion(proto.Message):
+ r"""Represents one model Version in ml.googleapis.com.
+
+ Attributes:
+ endpoint (str):
+ The ml.googleapis.com endpoint that this model Version
+ currently lives in. Example values:
+
+ - ml.googleapis.com
+ - us-centrall-ml.googleapis.com
+ - europe-west4-ml.googleapis.com
+ - asia-east1-ml.googleapis.com
+ version (str):
+ Full resource name of ml engine model Version. Format:
+ ``projects/{project}/models/{model}/versions/{version}``.
+ """
+
+ endpoint = proto.Field(proto.STRING, number=1)
+
+ version = proto.Field(proto.STRING, number=2)
+
+ class AutomlModel(proto.Message):
+ r"""Represents one Model in automl.googleapis.com.
+
+ Attributes:
+ model (str):
+ Full resource name of automl Model. Format:
+ ``projects/{project}/locations/{location}/models/{model}``.
+ model_display_name (str):
+ The Model's display name in
+ automl.googleapis.com.
+ """
+
+ model = proto.Field(proto.STRING, number=1)
+
+ model_display_name = proto.Field(proto.STRING, number=3)
+
+ class AutomlDataset(proto.Message):
+ r"""Represents one Dataset in automl.googleapis.com.
+
+ Attributes:
+ dataset (str):
+ Full resource name of automl Dataset. Format:
+ ``projects/{project}/locations/{location}/datasets/{dataset}``.
+ dataset_display_name (str):
+ The Dataset's display name in
+ automl.googleapis.com.
+ """
+
+ dataset = proto.Field(proto.STRING, number=1)
+
+ dataset_display_name = proto.Field(proto.STRING, number=4)
+
+ class DataLabelingDataset(proto.Message):
+ r"""Represents one Dataset in datalabeling.googleapis.com.
+
+ Attributes:
+ dataset (str):
+ Full resource name of data labeling Dataset. Format:
+ ``projects/{project}/datasets/{dataset}``.
+ dataset_display_name (str):
+ The Dataset's display name in
+ datalabeling.googleapis.com.
+ data_labeling_annotated_datasets (Sequence[google.cloud.aiplatform_v1.types.MigratableResource.DataLabelingDataset.DataLabelingAnnotatedDataset]):
+ The migratable AnnotatedDataset in
+ datalabeling.googleapis.com belongs to the data
+ labeling Dataset.
+ """
+
+ class DataLabelingAnnotatedDataset(proto.Message):
+ r"""Represents one AnnotatedDataset in
+ datalabeling.googleapis.com.
+
+ Attributes:
+ annotated_dataset (str):
+ Full resource name of data labeling AnnotatedDataset.
+ Format:
+
+ ``projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}``.
+ annotated_dataset_display_name (str):
+ The AnnotatedDataset's display name in
+ datalabeling.googleapis.com.
+ """
+
+ annotated_dataset = proto.Field(proto.STRING, number=1)
+
+ annotated_dataset_display_name = proto.Field(proto.STRING, number=3)
+
+ dataset = proto.Field(proto.STRING, number=1)
+
+ dataset_display_name = proto.Field(proto.STRING, number=4)
+
+ data_labeling_annotated_datasets = proto.RepeatedField(
+ proto.MESSAGE,
+ number=3,
+ message="MigratableResource.DataLabelingDataset.DataLabelingAnnotatedDataset",
+ )
+
+ ml_engine_model_version = proto.Field(
+ proto.MESSAGE, number=1, oneof="resource", message=MlEngineModelVersion,
+ )
+
+ automl_model = proto.Field(
+ proto.MESSAGE, number=2, oneof="resource", message=AutomlModel,
+ )
+
+ automl_dataset = proto.Field(
+ proto.MESSAGE, number=3, oneof="resource", message=AutomlDataset,
+ )
+
+ data_labeling_dataset = proto.Field(
+ proto.MESSAGE, number=4, oneof="resource", message=DataLabelingDataset,
+ )
+
+ last_migrate_time = proto.Field(
+ proto.MESSAGE, number=5, message=timestamp.Timestamp,
+ )
+
+ last_update_time = proto.Field(
+ proto.MESSAGE, number=6, message=timestamp.Timestamp,
+ )
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform_v1/types/migration_service.py b/google/cloud/aiplatform_v1/types/migration_service.py
new file mode 100644
index 0000000000..acd69b37b4
--- /dev/null
+++ b/google/cloud/aiplatform_v1/types/migration_service.py
@@ -0,0 +1,376 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+from google.cloud.aiplatform_v1.types import (
+ migratable_resource as gca_migratable_resource,
+)
+from google.cloud.aiplatform_v1.types import operation
+from google.rpc import status_pb2 as status # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1",
+ manifest={
+ "SearchMigratableResourcesRequest",
+ "SearchMigratableResourcesResponse",
+ "BatchMigrateResourcesRequest",
+ "MigrateResourceRequest",
+ "BatchMigrateResourcesResponse",
+ "MigrateResourceResponse",
+ "BatchMigrateResourcesOperationMetadata",
+ },
+)
+
+
+class SearchMigratableResourcesRequest(proto.Message):
+ r"""Request message for
+ ``MigrationService.SearchMigratableResources``.
+
+ Attributes:
+ parent (str):
+ Required. The location that the migratable resources should
+ be searched from. It's the AI Platform location that the
+ resources can be migrated to, not the resources' original
+ location. Format:
+ ``projects/{project}/locations/{location}``
+ page_size (int):
+ The standard page size.
+ The default and maximum value is 100.
+ page_token (str):
+ The standard page token.
+ filter (str):
+ Supported filters are:
+
+ - Resource type: For a specific type of MigratableResource.
+
+ - ``ml_engine_model_version:*``
+ - ``automl_model:*``,
+ - ``automl_dataset:*``
+ - ``data_labeling_dataset:*``.
+
+ - Migrated or not: Filter migrated resource or not by
+ last_migrate_time.
+
+ - ``last_migrate_time:*`` will filter migrated
+ resources.
+ - ``NOT last_migrate_time:*`` will filter not yet
+ migrated resources.
+ """
+
+ parent = proto.Field(proto.STRING, number=1)
+
+ page_size = proto.Field(proto.INT32, number=2)
+
+ page_token = proto.Field(proto.STRING, number=3)
+
+ filter = proto.Field(proto.STRING, number=4)
+
+
+class SearchMigratableResourcesResponse(proto.Message):
+ r"""Response message for
+ ``MigrationService.SearchMigratableResources``.
+
+ Attributes:
+ migratable_resources (Sequence[google.cloud.aiplatform_v1.types.MigratableResource]):
+ All migratable resources that can be migrated
+ to the location specified in the request.
+ next_page_token (str):
+ The standard next-page token. The migratable_resources may
+ not fill page_size in SearchMigratableResourcesRequest even
+ when there are subsequent pages.
+ """
+
+ @property
+ def raw_page(self):
+ return self
+
+ migratable_resources = proto.RepeatedField(
+ proto.MESSAGE, number=1, message=gca_migratable_resource.MigratableResource,
+ )
+
+ next_page_token = proto.Field(proto.STRING, number=2)
+
+
+class BatchMigrateResourcesRequest(proto.Message):
+ r"""Request message for
+ ``MigrationService.BatchMigrateResources``.
+
+ Attributes:
+ parent (str):
+ Required. The location of the migrated resource will live
+ in. Format: ``projects/{project}/locations/{location}``
+ migrate_resource_requests (Sequence[google.cloud.aiplatform_v1.types.MigrateResourceRequest]):
+ Required. The request messages specifying the
+ resources to migrate. They must be in the same
+ location as the destination. Up to 50 resources
+ can be migrated in one batch.
+ """
+
+ parent = proto.Field(proto.STRING, number=1)
+
+ migrate_resource_requests = proto.RepeatedField(
+ proto.MESSAGE, number=2, message="MigrateResourceRequest",
+ )
+
+
+class MigrateResourceRequest(proto.Message):
+ r"""Config of migrating one resource from automl.googleapis.com,
+ datalabeling.googleapis.com and ml.googleapis.com to AI
+ Platform.
+
+ Attributes:
+ migrate_ml_engine_model_version_config (google.cloud.aiplatform_v1.types.MigrateResourceRequest.MigrateMlEngineModelVersionConfig):
+ Config for migrating Version in
+ ml.googleapis.com to AI Platform's Model.
+ migrate_automl_model_config (google.cloud.aiplatform_v1.types.MigrateResourceRequest.MigrateAutomlModelConfig):
+ Config for migrating Model in
+ automl.googleapis.com to AI Platform's Model.
+ migrate_automl_dataset_config (google.cloud.aiplatform_v1.types.MigrateResourceRequest.MigrateAutomlDatasetConfig):
+ Config for migrating Dataset in
+ automl.googleapis.com to AI Platform's Dataset.
+ migrate_data_labeling_dataset_config (google.cloud.aiplatform_v1.types.MigrateResourceRequest.MigrateDataLabelingDatasetConfig):
+ Config for migrating Dataset in
+ datalabeling.googleapis.com to AI Platform's
+ Dataset.
+ """
+
+ class MigrateMlEngineModelVersionConfig(proto.Message):
+ r"""Config for migrating version in ml.googleapis.com to AI
+ Platform's Model.
+
+ Attributes:
+ endpoint (str):
+ Required. The ml.googleapis.com endpoint that this model
+ version should be migrated from. Example values:
+
+ - ml.googleapis.com
+
+ - us-centrall-ml.googleapis.com
+
+ - europe-west4-ml.googleapis.com
+
+ - asia-east1-ml.googleapis.com
+ model_version (str):
+ Required. Full resource name of ml engine model version.
+ Format:
+ ``projects/{project}/models/{model}/versions/{version}``.
+ model_display_name (str):
+ Required. Display name of the model in AI
+ Platform. System will pick a display name if
+ unspecified.
+ """
+
+ endpoint = proto.Field(proto.STRING, number=1)
+
+ model_version = proto.Field(proto.STRING, number=2)
+
+ model_display_name = proto.Field(proto.STRING, number=3)
+
+ class MigrateAutomlModelConfig(proto.Message):
+ r"""Config for migrating Model in automl.googleapis.com to AI
+ Platform's Model.
+
+ Attributes:
+ model (str):
+ Required. Full resource name of automl Model. Format:
+ ``projects/{project}/locations/{location}/models/{model}``.
+ model_display_name (str):
+ Optional. Display name of the model in AI
+ Platform. System will pick a display name if
+ unspecified.
+ """
+
+ model = proto.Field(proto.STRING, number=1)
+
+ model_display_name = proto.Field(proto.STRING, number=2)
+
+ class MigrateAutomlDatasetConfig(proto.Message):
+ r"""Config for migrating Dataset in automl.googleapis.com to AI
+ Platform's Dataset.
+
+ Attributes:
+ dataset (str):
+ Required. Full resource name of automl Dataset. Format:
+ ``projects/{project}/locations/{location}/datasets/{dataset}``.
+ dataset_display_name (str):
+ Required. Display name of the Dataset in AI
+ Platform. System will pick a display name if
+ unspecified.
+ """
+
+ dataset = proto.Field(proto.STRING, number=1)
+
+ dataset_display_name = proto.Field(proto.STRING, number=2)
+
+ class MigrateDataLabelingDatasetConfig(proto.Message):
+ r"""Config for migrating Dataset in datalabeling.googleapis.com
+ to AI Platform's Dataset.
+
+ Attributes:
+ dataset (str):
+ Required. Full resource name of data labeling Dataset.
+ Format: ``projects/{project}/datasets/{dataset}``.
+ dataset_display_name (str):
+ Optional. Display name of the Dataset in AI
+ Platform. System will pick a display name if
+ unspecified.
+ migrate_data_labeling_annotated_dataset_configs (Sequence[google.cloud.aiplatform_v1.types.MigrateResourceRequest.MigrateDataLabelingDatasetConfig.MigrateDataLabelingAnnotatedDatasetConfig]):
+ Optional. Configs for migrating
+ AnnotatedDataset in datalabeling.googleapis.com
+ to AI Platform's SavedQuery. The specified
+ AnnotatedDatasets have to belong to the
+ datalabeling Dataset.
+ """
+
+ class MigrateDataLabelingAnnotatedDatasetConfig(proto.Message):
+ r"""Config for migrating AnnotatedDataset in
+ datalabeling.googleapis.com to AI Platform's SavedQuery.
+
+ Attributes:
+ annotated_dataset (str):
+ Required. Full resource name of data labeling
+ AnnotatedDataset. Format:
+
+ ``projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}``.
+ """
+
+ annotated_dataset = proto.Field(proto.STRING, number=1)
+
+ dataset = proto.Field(proto.STRING, number=1)
+
+ dataset_display_name = proto.Field(proto.STRING, number=2)
+
+ migrate_data_labeling_annotated_dataset_configs = proto.RepeatedField(
+ proto.MESSAGE,
+ number=3,
+ message="MigrateResourceRequest.MigrateDataLabelingDatasetConfig.MigrateDataLabelingAnnotatedDatasetConfig",
+ )
+
+ migrate_ml_engine_model_version_config = proto.Field(
+ proto.MESSAGE,
+ number=1,
+ oneof="request",
+ message=MigrateMlEngineModelVersionConfig,
+ )
+
+ migrate_automl_model_config = proto.Field(
+ proto.MESSAGE, number=2, oneof="request", message=MigrateAutomlModelConfig,
+ )
+
+ migrate_automl_dataset_config = proto.Field(
+ proto.MESSAGE, number=3, oneof="request", message=MigrateAutomlDatasetConfig,
+ )
+
+ migrate_data_labeling_dataset_config = proto.Field(
+ proto.MESSAGE,
+ number=4,
+ oneof="request",
+ message=MigrateDataLabelingDatasetConfig,
+ )
+
+
+class BatchMigrateResourcesResponse(proto.Message):
+ r"""Response message for
+ ``MigrationService.BatchMigrateResources``.
+
+ Attributes:
+ migrate_resource_responses (Sequence[google.cloud.aiplatform_v1.types.MigrateResourceResponse]):
+ Successfully migrated resources.
+ """
+
+ migrate_resource_responses = proto.RepeatedField(
+ proto.MESSAGE, number=1, message="MigrateResourceResponse",
+ )
+
+
+class MigrateResourceResponse(proto.Message):
+ r"""Describes a successfully migrated resource.
+
+ Attributes:
+ dataset (str):
+ Migrated Dataset's resource name.
+ model (str):
+ Migrated Model's resource name.
+ migratable_resource (google.cloud.aiplatform_v1.types.MigratableResource):
+ Before migration, the identifier in
+ ml.googleapis.com, automl.googleapis.com or
+ datalabeling.googleapis.com.
+ """
+
+ dataset = proto.Field(proto.STRING, number=1, oneof="migrated_resource")
+
+ model = proto.Field(proto.STRING, number=2, oneof="migrated_resource")
+
+ migratable_resource = proto.Field(
+ proto.MESSAGE, number=3, message=gca_migratable_resource.MigratableResource,
+ )
+
+
+class BatchMigrateResourcesOperationMetadata(proto.Message):
+ r"""Runtime operation information for
+ ``MigrationService.BatchMigrateResources``.
+
+ Attributes:
+ generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata):
+ The common part of the operation metadata.
+ partial_results (Sequence[google.cloud.aiplatform_v1.types.BatchMigrateResourcesOperationMetadata.PartialResult]):
+ Partial results that reflect the latest
+ migration operation progress.
+ """
+
+ class PartialResult(proto.Message):
+ r"""Represents a partial result in batch migration operation for one
+ ``MigrateResourceRequest``.
+
+ Attributes:
+ error (google.rpc.status_pb2.Status):
+ The error result of the migration request in
+ case of failure.
+ model (str):
+ Migrated model resource name.
+ dataset (str):
+ Migrated dataset resource name.
+ request (google.cloud.aiplatform_v1.types.MigrateResourceRequest):
+ It's the same as the value in
+ [MigrateResourceRequest.migrate_resource_requests][].
+ """
+
+ error = proto.Field(
+ proto.MESSAGE, number=2, oneof="result", message=status.Status,
+ )
+
+ model = proto.Field(proto.STRING, number=3, oneof="result")
+
+ dataset = proto.Field(proto.STRING, number=4, oneof="result")
+
+ request = proto.Field(
+ proto.MESSAGE, number=1, message="MigrateResourceRequest",
+ )
+
+ generic_metadata = proto.Field(
+ proto.MESSAGE, number=1, message=operation.GenericOperationMetadata,
+ )
+
+ partial_results = proto.RepeatedField(
+ proto.MESSAGE, number=2, message=PartialResult,
+ )
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform_v1/types/model.py b/google/cloud/aiplatform_v1/types/model.py
new file mode 100644
index 0000000000..c2db797b98
--- /dev/null
+++ b/google/cloud/aiplatform_v1/types/model.py
@@ -0,0 +1,630 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+from google.cloud.aiplatform_v1.types import deployed_model_ref
+from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec
+from google.cloud.aiplatform_v1.types import env_var
+from google.protobuf import struct_pb2 as struct # type: ignore
+from google.protobuf import timestamp_pb2 as timestamp # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1",
+ manifest={"Model", "PredictSchemata", "ModelContainerSpec", "Port",},
+)
+
+
+class Model(proto.Message):
+ r"""A trained machine learning Model.
+
+ Attributes:
+ name (str):
+ The resource name of the Model.
+ display_name (str):
+ Required. The display name of the Model.
+ The name can be up to 128 characters long and
+ can be consist of any UTF-8 characters.
+ description (str):
+ The description of the Model.
+ predict_schemata (google.cloud.aiplatform_v1.types.PredictSchemata):
+ The schemata that describe formats of the Model's
+ predictions and explanations as given and returned via
+ ``PredictionService.Predict``
+ and [PredictionService.Explain][].
+ metadata_schema_uri (str):
+ Immutable. Points to a YAML file stored on Google Cloud
+ Storage describing additional information about the Model,
+ that is specific to it. Unset if the Model does not have any
+ additional information. The schema is defined as an OpenAPI
+ 3.0.2 `Schema
+ Object `__.
+ AutoML Models always have this field populated by AI
+ Platform, if no additional metadata is needed, this field is
+ set to an empty string. Note: The URI given on output will
+ be immutable and probably different, including the URI
+ scheme, than the one given on input. The output URI will
+ point to a location where the user only has a read access.
+ metadata (google.protobuf.struct_pb2.Value):
+ Immutable. An additional information about the Model; the
+ schema of the metadata can be found in
+ ``metadata_schema``.
+ Unset if the Model does not have any additional information.
+ supported_export_formats (Sequence[google.cloud.aiplatform_v1.types.Model.ExportFormat]):
+ Output only. The formats in which this Model
+ may be exported. If empty, this Model is not
+ available for export.
+ training_pipeline (str):
+ Output only. The resource name of the
+ TrainingPipeline that uploaded this Model, if
+ any.
+ container_spec (google.cloud.aiplatform_v1.types.ModelContainerSpec):
+ Input only. The specification of the container that is to be
+ used when deploying this Model. The specification is
+ ingested upon
+ ``ModelService.UploadModel``,
+ and all binaries it contains are copied and stored
+ internally by AI Platform. Not present for AutoML Models.
+ artifact_uri (str):
+ Immutable. The path to the directory
+ containing the Model artifact and any of its
+ supporting files. Not present for AutoML Models.
+ supported_deployment_resources_types (Sequence[google.cloud.aiplatform_v1.types.Model.DeploymentResourcesType]):
+ Output only. When this Model is deployed, its prediction
+ resources are described by the ``prediction_resources``
+ field of the
+ ``Endpoint.deployed_models``
+ object. Because not all Models support all resource
+ configuration types, the configuration types this Model
+ supports are listed here. If no configuration types are
+ listed, the Model cannot be deployed to an
+ ``Endpoint`` and does not
+ support online predictions
+ (``PredictionService.Predict``
+ or [PredictionService.Explain][]). Such a Model can serve
+ predictions by using a
+ ``BatchPredictionJob``,
+ if it has at least one entry each in
+ ``supported_input_storage_formats``
+ and
+ ``supported_output_storage_formats``.
+ supported_input_storage_formats (Sequence[str]):
+ Output only. The formats this Model supports in
+ ``BatchPredictionJob.input_config``.
+ If
+ ``PredictSchemata.instance_schema_uri``
+ exists, the instances should be given as per that schema.
+
+ The possible formats are:
+
+ - ``jsonl`` The JSON Lines format, where each instance is a
+ single line. Uses
+ ``GcsSource``.
+
+ - ``csv`` The CSV format, where each instance is a single
+ comma-separated line. The first line in the file is the
+ header, containing comma-separated field names. Uses
+ ``GcsSource``.
+
+ - ``tf-record`` The TFRecord format, where each instance is
+ a single record in tfrecord syntax. Uses
+ ``GcsSource``.
+
+ - ``tf-record-gzip`` Similar to ``tf-record``, but the file
+ is gzipped. Uses
+ ``GcsSource``.
+
+ - ``bigquery`` Each instance is a single row in BigQuery.
+ Uses
+ ``BigQuerySource``.
+
+ - ``file-list`` Each line of the file is the location of an
+ instance to process, uses ``gcs_source`` field of the
+ ``InputConfig``
+ object.
+
+ If this Model doesn't support any of these formats it means
+ it cannot be used with a
+ ``BatchPredictionJob``.
+ However, if it has
+ ``supported_deployment_resources_types``,
+ it could serve online predictions by using
+ ``PredictionService.Predict``
+ or [PredictionService.Explain][].
+ supported_output_storage_formats (Sequence[str]):
+ Output only. The formats this Model supports in
+ ``BatchPredictionJob.output_config``.
+ If both
+ ``PredictSchemata.instance_schema_uri``
+ and
+ ``PredictSchemata.prediction_schema_uri``
+ exist, the predictions are returned together with their
+ instances. In other words, the prediction has the original
+ instance data first, followed by the actual prediction
+ content (as per the schema).
+
+ The possible formats are:
+
+ - ``jsonl`` The JSON Lines format, where each prediction is
+ a single line. Uses
+ ``GcsDestination``.
+
+ - ``csv`` The CSV format, where each prediction is a single
+ comma-separated line. The first line in the file is the
+ header, containing comma-separated field names. Uses
+ ``GcsDestination``.
+
+ - ``bigquery`` Each prediction is a single row in a
+ BigQuery table, uses
+ ``BigQueryDestination``
+ .
+
+ If this Model doesn't support any of these formats it means
+ it cannot be used with a
+ ``BatchPredictionJob``.
+ However, if it has
+ ``supported_deployment_resources_types``,
+ it could serve online predictions by using
+ ``PredictionService.Predict``
+ or [PredictionService.Explain][].
+ create_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. Timestamp when this Model was
+ uploaded into AI Platform.
+ update_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. Timestamp when this Model was
+ most recently updated.
+ deployed_models (Sequence[google.cloud.aiplatform_v1.types.DeployedModelRef]):
+ Output only. The pointers to DeployedModels
+ created from this Model. Note that Model could
+ have been deployed to Endpoints in different
+ Locations.
+ etag (str):
+ Used to perform consistent read-modify-write
+ updates. If not set, a blind "overwrite" update
+ happens.
+ labels (Sequence[google.cloud.aiplatform_v1.types.Model.LabelsEntry]):
+ The labels with user-defined metadata to
+ organize your Models.
+ Label keys and values can be no longer than 64
+ characters (Unicode codepoints), can only
+ contain lowercase letters, numeric characters,
+ underscores and dashes. International characters
+ are allowed.
+ See https://goo.gl/xmQnxf for more information
+ and examples of labels.
+ encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec):
+ Customer-managed encryption key spec for a
+ Model. If set, this Model and all sub-resources
+ of this Model will be secured by this key.
+ """
+
+ class DeploymentResourcesType(proto.Enum):
+ r"""Identifies a type of Model's prediction resources."""
+ DEPLOYMENT_RESOURCES_TYPE_UNSPECIFIED = 0
+ DEDICATED_RESOURCES = 1
+ AUTOMATIC_RESOURCES = 2
+
+ class ExportFormat(proto.Message):
+ r"""Represents export format supported by the Model.
+ All formats export to Google Cloud Storage.
+
+ Attributes:
+ id (str):
+ Output only. The ID of the export format. The possible
+ format IDs are:
+
+ - ``tflite`` Used for Android mobile devices.
+
+ - ``edgetpu-tflite`` Used for `Edge
+ TPU `__ devices.
+
+ - ``tf-saved-model`` A tensorflow model in SavedModel
+ format.
+
+ - ``tf-js`` A
+ `TensorFlow.js `__ model
+ that can be used in the browser and in Node.js using
+ JavaScript.
+
+ - ``core-ml`` Used for iOS mobile devices.
+
+ - ``custom-trained`` A Model that was uploaded or trained
+ by custom code.
+ exportable_contents (Sequence[google.cloud.aiplatform_v1.types.Model.ExportFormat.ExportableContent]):
+ Output only. The content of this Model that
+ may be exported.
+ """
+
+ class ExportableContent(proto.Enum):
+ r"""The Model content that can be exported."""
+ EXPORTABLE_CONTENT_UNSPECIFIED = 0
+ ARTIFACT = 1
+ IMAGE = 2
+
+ id = proto.Field(proto.STRING, number=1)
+
+ exportable_contents = proto.RepeatedField(
+ proto.ENUM, number=2, enum="Model.ExportFormat.ExportableContent",
+ )
+
+ name = proto.Field(proto.STRING, number=1)
+
+ display_name = proto.Field(proto.STRING, number=2)
+
+ description = proto.Field(proto.STRING, number=3)
+
+ predict_schemata = proto.Field(proto.MESSAGE, number=4, message="PredictSchemata",)
+
+ metadata_schema_uri = proto.Field(proto.STRING, number=5)
+
+ metadata = proto.Field(proto.MESSAGE, number=6, message=struct.Value,)
+
+ supported_export_formats = proto.RepeatedField(
+ proto.MESSAGE, number=20, message=ExportFormat,
+ )
+
+ training_pipeline = proto.Field(proto.STRING, number=7)
+
+ container_spec = proto.Field(proto.MESSAGE, number=9, message="ModelContainerSpec",)
+
+ artifact_uri = proto.Field(proto.STRING, number=26)
+
+ supported_deployment_resources_types = proto.RepeatedField(
+ proto.ENUM, number=10, enum=DeploymentResourcesType,
+ )
+
+ supported_input_storage_formats = proto.RepeatedField(proto.STRING, number=11)
+
+ supported_output_storage_formats = proto.RepeatedField(proto.STRING, number=12)
+
+ create_time = proto.Field(proto.MESSAGE, number=13, message=timestamp.Timestamp,)
+
+ update_time = proto.Field(proto.MESSAGE, number=14, message=timestamp.Timestamp,)
+
+ deployed_models = proto.RepeatedField(
+ proto.MESSAGE, number=15, message=deployed_model_ref.DeployedModelRef,
+ )
+
+ etag = proto.Field(proto.STRING, number=16)
+
+ labels = proto.MapField(proto.STRING, proto.STRING, number=17)
+
+ encryption_spec = proto.Field(
+ proto.MESSAGE, number=24, message=gca_encryption_spec.EncryptionSpec,
+ )
+
+
+class PredictSchemata(proto.Message):
+ r"""Contains the schemata used in Model's predictions and explanations
+ via
+ ``PredictionService.Predict``,
+ [PredictionService.Explain][] and
+ ``BatchPredictionJob``.
+
+ Attributes:
+ instance_schema_uri (str):
+ Immutable. Points to a YAML file stored on Google Cloud
+ Storage describing the format of a single instance, which
+ are used in
+ ``PredictRequest.instances``,
+ [ExplainRequest.instances][] and
+ ``BatchPredictionJob.input_config``.
+ The schema is defined as an OpenAPI 3.0.2 `Schema
+ Object `__.
+ AutoML Models always have this field populated by AI
+ Platform. Note: The URI given on output will be immutable
+ and probably different, including the URI scheme, than the
+ one given on input. The output URI will point to a location
+ where the user only has a read access.
+ parameters_schema_uri (str):
+ Immutable. Points to a YAML file stored on Google Cloud
+ Storage describing the parameters of prediction and
+ explanation via
+ ``PredictRequest.parameters``,
+ [ExplainRequest.parameters][] and
+ ``BatchPredictionJob.model_parameters``.
+ The schema is defined as an OpenAPI 3.0.2 `Schema
+ Object `__.
+ AutoML Models always have this field populated by AI
+ Platform, if no parameters are supported, then it is set to
+ an empty string. Note: The URI given on output will be
+ immutable and probably different, including the URI scheme,
+ than the one given on input. The output URI will point to a
+ location where the user only has a read access.
+ prediction_schema_uri (str):
+ Immutable. Points to a YAML file stored on Google Cloud
+ Storage describing the format of a single prediction
+ produced by this Model, which are returned via
+ ``PredictResponse.predictions``,
+ [ExplainResponse.explanations][], and
+ ``BatchPredictionJob.output_config``.
+ The schema is defined as an OpenAPI 3.0.2 `Schema
+ Object `__.
+ AutoML Models always have this field populated by AI
+ Platform. Note: The URI given on output will be immutable
+ and probably different, including the URI scheme, than the
+ one given on input. The output URI will point to a location
+ where the user only has a read access.
+ """
+
+ instance_schema_uri = proto.Field(proto.STRING, number=1)
+
+ parameters_schema_uri = proto.Field(proto.STRING, number=2)
+
+ prediction_schema_uri = proto.Field(proto.STRING, number=3)
+
+
+class ModelContainerSpec(proto.Message):
+ r"""Specification of a container for serving predictions. This message
+ is a subset of the Kubernetes Container v1 core
+ `specification `__.
+
+ Attributes:
+ image_uri (str):
+ Required. Immutable. URI of the Docker image to be used as
+ the custom container for serving predictions. This URI must
+ identify an image in Artifact Registry or Container
+ Registry. Learn more about the container publishing
+ requirements, including permissions requirements for the AI
+ Platform Service Agent,
+ `here `__.
+
+ The container image is ingested upon
+ ``ModelService.UploadModel``,
+ stored internally, and this original path is afterwards not
+ used.
+
+ To learn about the requirements for the Docker image itself,
+ see `Custom container
+ requirements `__.
+ command (Sequence[str]):
+ Immutable. Specifies the command that runs when the
+ container starts. This overrides the container's
+ `ENTRYPOINT `__.
+ Specify this field as an array of executable and arguments,
+ similar to a Docker ``ENTRYPOINT``'s "exec" form, not its
+ "shell" form.
+
+ If you do not specify this field, then the container's
+ ``ENTRYPOINT`` runs, in conjunction with the
+ ``args``
+ field or the container's
+ ```CMD`` `__,
+ if either exists. If this field is not specified and the
+ container does not have an ``ENTRYPOINT``, then refer to the
+ Docker documentation about how ``CMD`` and ``ENTRYPOINT``
+ `interact `__.
+
+ If you specify this field, then you can also specify the
+ ``args`` field to provide additional arguments for this
+ command. However, if you specify this field, then the
+ container's ``CMD`` is ignored. See the `Kubernetes
+ documentation `__ about how
+ the ``command`` and ``args`` fields interact with a
+ container's ``ENTRYPOINT`` and ``CMD``.
+
+ In this field, you can reference environment variables `set
+ by AI
+ Platform `__
+ and environment variables set in the
+ ``env``
+ field. You cannot reference environment variables set in the
+ Docker image. In order for environment variables to be
+ expanded, reference them by using the following syntax:
+ $(VARIABLE_NAME) Note that this differs from Bash variable
+ expansion, which does not use parentheses. If a variable
+ cannot be resolved, the reference in the input string is
+ used unchanged. To avoid variable expansion, you can escape
+ this syntax with ``$$``; for example: $$(VARIABLE_NAME) This
+ field corresponds to the ``command`` field of the Kubernetes
+ Containers `v1 core
+ API `__.
+ args (Sequence[str]):
+ Immutable. Specifies arguments for the command that runs
+ when the container starts. This overrides the container's
+ ```CMD`` `__.
+ Specify this field as an array of executable and arguments,
+ similar to a Docker ``CMD``'s "default parameters" form.
+
+ If you don't specify this field but do specify the
+ ``command``
+ field, then the command from the ``command`` field runs
+ without any additional arguments. See the `Kubernetes
+ documentation `__ about how
+ the ``command`` and ``args`` fields interact with a
+ container's ``ENTRYPOINT`` and ``CMD``.
+
+ If you don't specify this field and don't specify the
+ ``command`` field, then the container's
+ ```ENTRYPOINT`` `__
+ and ``CMD`` determine what runs based on their default
+ behavior. See the Docker documentation about how ``CMD`` and
+ ``ENTRYPOINT`` `interact `__.
+
+ In this field, you can reference environment variables `set
+ by AI
+ Platform `__
+ and environment variables set in the
+ ``env``
+ field. You cannot reference environment variables set in the
+ Docker image. In order for environment variables to be
+ expanded, reference them by using the following syntax:
+ $(VARIABLE_NAME) Note that this differs from Bash variable
+ expansion, which does not use parentheses. If a variable
+ cannot be resolved, the reference in the input string is
+ used unchanged. To avoid variable expansion, you can escape
+ this syntax with ``$$``; for example: $$(VARIABLE_NAME) This
+ field corresponds to the ``args`` field of the Kubernetes
+ Containers `v1 core
+ API `__.
+ env (Sequence[google.cloud.aiplatform_v1.types.EnvVar]):
+ Immutable. List of environment variables to set in the
+ container. After the container starts running, code running
+ in the container can read these environment variables.
+
+ Additionally, the
+ ``command``
+ and
+ ``args``
+ fields can reference these variables. Later entries in this
+ list can also reference earlier entries. For example, the
+ following example sets the variable ``VAR_2`` to have the
+ value ``foo bar``:
+
+ .. code:: json
+
+ [
+ {
+ "name": "VAR_1",
+ "value": "foo"
+ },
+ {
+ "name": "VAR_2",
+ "value": "$(VAR_1) bar"
+ }
+ ]
+
+ If you switch the order of the variables in the example,
+ then the expansion does not occur.
+
+ This field corresponds to the ``env`` field of the
+ Kubernetes Containers `v1 core
+ API `__.
+ ports (Sequence[google.cloud.aiplatform_v1.types.Port]):
+ Immutable. List of ports to expose from the container. AI
+ Platform sends any prediction requests that it receives to
+ the first port on this list. AI Platform also sends
+ `liveness and health
+ checks `__ to
+ this port.
+
+ If you do not specify this field, it defaults to following
+ value:
+
+ .. code:: json
+
+ [
+ {
+ "containerPort": 8080
+ }
+ ]
+
+ AI Platform does not use ports other than the first one
+ listed. This field corresponds to the ``ports`` field of the
+ Kubernetes Containers `v1 core
+ API `__.
+ predict_route (str):
+ Immutable. HTTP path on the container to send prediction
+ requests to. AI Platform forwards requests sent using
+ ``projects.locations.endpoints.predict``
+ to this path on the container's IP address and port. AI
+ Platform then returns the container's response in the API
+ response.
+
+ For example, if you set this field to ``/foo``, then when AI
+ Platform receives a prediction request, it forwards the
+ request body in a POST request to the ``/foo`` path on the
+ port of your container specified by the first value of this
+ ``ModelContainerSpec``'s
+ ``ports``
+ field.
+
+ If you don't specify this field, it defaults to the
+ following value when you [deploy this Model to an
+ Endpoint][google.cloud.aiplatform.v1.EndpointService.DeployModel]:
+ /v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict
+ The placeholders in this value are replaced as follows:
+
+ - ENDPOINT: The last segment (following ``endpoints/``)of
+ the Endpoint.name][] field of the Endpoint where this
+ Model has been deployed. (AI Platform makes this value
+ available to your container code as the
+ ```AIP_ENDPOINT_ID`` `__
+ environment variable.)
+
+ - DEPLOYED_MODEL:
+ ``DeployedModel.id``
+ of the ``DeployedModel``. (AI Platform makes this value
+ available to your container code as the
+ ```AIP_DEPLOYED_MODEL_ID`` environment
+ variable `__.)
+ health_route (str):
+ Immutable. HTTP path on the container to send health checks
+ to. AI Platform intermittently sends GET requests to this
+ path on the container's IP address and port to check that
+ the container is healthy. Read more about `health
+ checks `__.
+
+ For example, if you set this field to ``/bar``, then AI
+ Platform intermittently sends a GET request to the ``/bar``
+ path on the port of your container specified by the first
+ value of this ``ModelContainerSpec``'s
+ ``ports``
+ field.
+
+ If you don't specify this field, it defaults to the
+ following value when you [deploy this Model to an
+ Endpoint][google.cloud.aiplatform.v1.EndpointService.DeployModel]:
+ /v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict
+ The placeholders in this value are replaced as follows:
+
+ - ENDPOINT: The last segment (following ``endpoints/``)of
+ the Endpoint.name][] field of the Endpoint where this
+ Model has been deployed. (AI Platform makes this value
+ available to your container code as the
+ ```AIP_ENDPOINT_ID`` `__
+ environment variable.)
+
+ - DEPLOYED_MODEL:
+ ``DeployedModel.id``
+ of the ``DeployedModel``. (AI Platform makes this value
+ available to your container code as the
+ ```AIP_DEPLOYED_MODEL_ID`` `__
+ environment variable.)
+ """
+
+ image_uri = proto.Field(proto.STRING, number=1)
+
+ command = proto.RepeatedField(proto.STRING, number=2)
+
+ args = proto.RepeatedField(proto.STRING, number=3)
+
+ env = proto.RepeatedField(proto.MESSAGE, number=4, message=env_var.EnvVar,)
+
+ ports = proto.RepeatedField(proto.MESSAGE, number=5, message="Port",)
+
+ predict_route = proto.Field(proto.STRING, number=6)
+
+ health_route = proto.Field(proto.STRING, number=7)
+
+
+class Port(proto.Message):
+ r"""Represents a network port in a container.
+
+ Attributes:
+ container_port (int):
+ The number of the port to expose on the pod's
+ IP address. Must be a valid port number, between
+ 1 and 65535 inclusive.
+ """
+
+ container_port = proto.Field(proto.INT32, number=3)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform_v1/types/model_evaluation.py b/google/cloud/aiplatform_v1/types/model_evaluation.py
new file mode 100644
index 0000000000..f617f3d197
--- /dev/null
+++ b/google/cloud/aiplatform_v1/types/model_evaluation.py
@@ -0,0 +1,73 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+from google.protobuf import struct_pb2 as struct # type: ignore
+from google.protobuf import timestamp_pb2 as timestamp # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1", manifest={"ModelEvaluation",},
+)
+
+
+class ModelEvaluation(proto.Message):
+ r"""A collection of metrics calculated by comparing Model's
+ predictions on all of the test data against annotations from the
+ test data.
+
+ Attributes:
+ name (str):
+ Output only. The resource name of the
+ ModelEvaluation.
+ metrics_schema_uri (str):
+ Output only. Points to a YAML file stored on Google Cloud
+ Storage describing the
+ ``metrics``
+ of this ModelEvaluation. The schema is defined as an OpenAPI
+ 3.0.2 `Schema
+ Object `__.
+ metrics (google.protobuf.struct_pb2.Value):
+ Output only. Evaluation metrics of the Model. The schema of
+ the metrics is stored in
+ ``metrics_schema_uri``
+ create_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. Timestamp when this
+ ModelEvaluation was created.
+ slice_dimensions (Sequence[str]):
+ Output only. All possible
+ ``dimensions`` of
+ ModelEvaluationSlices. The dimensions can be used as the
+ filter of the
+ ``ModelService.ListModelEvaluationSlices``
+ request, in the form of ``slice.dimension = ``.
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+ metrics_schema_uri = proto.Field(proto.STRING, number=2)
+
+ metrics = proto.Field(proto.MESSAGE, number=3, message=struct.Value,)
+
+ create_time = proto.Field(proto.MESSAGE, number=4, message=timestamp.Timestamp,)
+
+ slice_dimensions = proto.RepeatedField(proto.STRING, number=5)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform_v1/types/model_evaluation_slice.py b/google/cloud/aiplatform_v1/types/model_evaluation_slice.py
new file mode 100644
index 0000000000..5653c3d2b6
--- /dev/null
+++ b/google/cloud/aiplatform_v1/types/model_evaluation_slice.py
@@ -0,0 +1,91 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+from google.protobuf import struct_pb2 as struct # type: ignore
+from google.protobuf import timestamp_pb2 as timestamp # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1", manifest={"ModelEvaluationSlice",},
+)
+
+
+class ModelEvaluationSlice(proto.Message):
+ r"""A collection of metrics calculated by comparing Model's
+ predictions on a slice of the test data against ground truth
+ annotations.
+
+ Attributes:
+ name (str):
+ Output only. The resource name of the
+ ModelEvaluationSlice.
+ slice_ (google.cloud.aiplatform_v1.types.ModelEvaluationSlice.Slice):
+ Output only. The slice of the test data that
+ is used to evaluate the Model.
+ metrics_schema_uri (str):
+ Output only. Points to a YAML file stored on Google Cloud
+ Storage describing the
+ ``metrics``
+ of this ModelEvaluationSlice. The schema is defined as an
+ OpenAPI 3.0.2 `Schema
+ Object `__.
+ metrics (google.protobuf.struct_pb2.Value):
+ Output only. Sliced evaluation metrics of the Model. The
+ schema of the metrics is stored in
+ ``metrics_schema_uri``
+ create_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. Timestamp when this
+ ModelEvaluationSlice was created.
+ """
+
+ class Slice(proto.Message):
+ r"""Definition of a slice.
+
+ Attributes:
+ dimension (str):
+ Output only. The dimension of the slice. Well-known
+ dimensions are:
+
+ - ``annotationSpec``: This slice is on the test data that
+ has either ground truth or prediction with
+ ``AnnotationSpec.display_name``
+ equals to
+ ``value``.
+ value (str):
+ Output only. The value of the dimension in
+ this slice.
+ """
+
+ dimension = proto.Field(proto.STRING, number=1)
+
+ value = proto.Field(proto.STRING, number=2)
+
+ name = proto.Field(proto.STRING, number=1)
+
+ slice_ = proto.Field(proto.MESSAGE, number=2, message=Slice,)
+
+ metrics_schema_uri = proto.Field(proto.STRING, number=3)
+
+ metrics = proto.Field(proto.MESSAGE, number=4, message=struct.Value,)
+
+ create_time = proto.Field(proto.MESSAGE, number=5, message=timestamp.Timestamp,)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform_v1/types/model_service.py b/google/cloud/aiplatform_v1/types/model_service.py
new file mode 100644
index 0000000000..454e014fd5
--- /dev/null
+++ b/google/cloud/aiplatform_v1/types/model_service.py
@@ -0,0 +1,487 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+from google.cloud.aiplatform_v1.types import io
+from google.cloud.aiplatform_v1.types import model as gca_model
+from google.cloud.aiplatform_v1.types import model_evaluation
+from google.cloud.aiplatform_v1.types import model_evaluation_slice
+from google.cloud.aiplatform_v1.types import operation
+from google.protobuf import field_mask_pb2 as field_mask # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1",
+ manifest={
+ "UploadModelRequest",
+ "UploadModelOperationMetadata",
+ "UploadModelResponse",
+ "GetModelRequest",
+ "ListModelsRequest",
+ "ListModelsResponse",
+ "UpdateModelRequest",
+ "DeleteModelRequest",
+ "ExportModelRequest",
+ "ExportModelOperationMetadata",
+ "ExportModelResponse",
+ "GetModelEvaluationRequest",
+ "ListModelEvaluationsRequest",
+ "ListModelEvaluationsResponse",
+ "GetModelEvaluationSliceRequest",
+ "ListModelEvaluationSlicesRequest",
+ "ListModelEvaluationSlicesResponse",
+ },
+)
+
+
+class UploadModelRequest(proto.Message):
+ r"""Request message for
+ ``ModelService.UploadModel``.
+
+ Attributes:
+ parent (str):
+ Required. The resource name of the Location into which to
+ upload the Model. Format:
+ ``projects/{project}/locations/{location}``
+ model (google.cloud.aiplatform_v1.types.Model):
+ Required. The Model to create.
+ """
+
+ parent = proto.Field(proto.STRING, number=1)
+
+ model = proto.Field(proto.MESSAGE, number=2, message=gca_model.Model,)
+
+
+class UploadModelOperationMetadata(proto.Message):
+ r"""Details of
+ ``ModelService.UploadModel``
+ operation.
+
+ Attributes:
+ generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata):
+ The common part of the operation metadata.
+ """
+
+ generic_metadata = proto.Field(
+ proto.MESSAGE, number=1, message=operation.GenericOperationMetadata,
+ )
+
+
+class UploadModelResponse(proto.Message):
+ r"""Response message of
+ ``ModelService.UploadModel``
+ operation.
+
+ Attributes:
+ model (str):
+ The name of the uploaded Model resource. Format:
+ ``projects/{project}/locations/{location}/models/{model}``
+ """
+
+ model = proto.Field(proto.STRING, number=1)
+
+
+class GetModelRequest(proto.Message):
+ r"""Request message for
+ ``ModelService.GetModel``.
+
+ Attributes:
+ name (str):
+ Required. The name of the Model resource. Format:
+ ``projects/{project}/locations/{location}/models/{model}``
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+
+class ListModelsRequest(proto.Message):
+ r"""Request message for
+ ``ModelService.ListModels``.
+
+ Attributes:
+ parent (str):
+ Required. The resource name of the Location to list the
+ Models from. Format:
+ ``projects/{project}/locations/{location}``
+ filter (str):
+ An expression for filtering the results of the request. For
+ field names both snake_case and camelCase are supported.
+
+ - ``model`` supports = and !=. ``model`` represents the
+ Model ID, i.e. the last segment of the Model's [resource
+ name][google.cloud.aiplatform.v1.Model.name].
+ - ``display_name`` supports = and !=
+ - ``labels`` supports general map functions that is:
+
+ - ``labels.key=value`` - key:value equality
+ - \`labels.key:\* or labels:key - key existence
+ - A key including a space must be quoted.
+ ``labels."a key"``.
+
+ Some examples:
+
+ - ``model=1234``
+ - ``displayName="myDisplayName"``
+ - ``labels.myKey="myValue"``
+ page_size (int):
+ The standard list page size.
+ page_token (str):
+ The standard list page token. Typically obtained via
+ ``ListModelsResponse.next_page_token``
+ of the previous
+ ``ModelService.ListModels``
+ call.
+ read_mask (google.protobuf.field_mask_pb2.FieldMask):
+ Mask specifying which fields to read.
+ order_by (str):
+ A comma-separated list of fields to order by, sorted in
+ ascending order. Use "desc" after a field name for
+ descending. Supported fields:
+
+ - ``display_name``
+ - ``create_time``
+ - ``update_time``
+
+ Example: ``display_name, create_time desc``.
+ """
+
+ parent = proto.Field(proto.STRING, number=1)
+
+ filter = proto.Field(proto.STRING, number=2)
+
+ page_size = proto.Field(proto.INT32, number=3)
+
+ page_token = proto.Field(proto.STRING, number=4)
+
+ read_mask = proto.Field(proto.MESSAGE, number=5, message=field_mask.FieldMask,)
+
+ order_by = proto.Field(proto.STRING, number=6)
+
+
+class ListModelsResponse(proto.Message):
+ r"""Response message for
+ ``ModelService.ListModels``
+
+ Attributes:
+ models (Sequence[google.cloud.aiplatform_v1.types.Model]):
+ List of Models in the requested page.
+ next_page_token (str):
+ A token to retrieve next page of results. Pass to
+ ``ListModelsRequest.page_token``
+ to obtain that page.
+ """
+
+ @property
+ def raw_page(self):
+ return self
+
+ models = proto.RepeatedField(proto.MESSAGE, number=1, message=gca_model.Model,)
+
+ next_page_token = proto.Field(proto.STRING, number=2)
+
+
+class UpdateModelRequest(proto.Message):
+ r"""Request message for
+ ``ModelService.UpdateModel``.
+
+ Attributes:
+ model (google.cloud.aiplatform_v1.types.Model):
+ Required. The Model which replaces the
+ resource on the server.
+ update_mask (google.protobuf.field_mask_pb2.FieldMask):
+ Required. The update mask applies to the resource. For the
+ ``FieldMask`` definition, see
+ `FieldMask `__.
+ """
+
+ model = proto.Field(proto.MESSAGE, number=1, message=gca_model.Model,)
+
+ update_mask = proto.Field(proto.MESSAGE, number=2, message=field_mask.FieldMask,)
+
+
+class DeleteModelRequest(proto.Message):
+ r"""Request message for
+ ``ModelService.DeleteModel``.
+
+ Attributes:
+ name (str):
+ Required. The name of the Model resource to be deleted.
+ Format:
+ ``projects/{project}/locations/{location}/models/{model}``
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+
+class ExportModelRequest(proto.Message):
+ r"""Request message for
+ ``ModelService.ExportModel``.
+
+ Attributes:
+ name (str):
+ Required. The resource name of the Model to export. Format:
+ ``projects/{project}/locations/{location}/models/{model}``
+ output_config (google.cloud.aiplatform_v1.types.ExportModelRequest.OutputConfig):
+ Required. The desired output location and
+ configuration.
+ """
+
+ class OutputConfig(proto.Message):
+ r"""Output configuration for the Model export.
+
+ Attributes:
+ export_format_id (str):
+ The ID of the format in which the Model must be exported.
+ Each Model lists the [export formats it
+ supports][google.cloud.aiplatform.v1.Model.supported_export_formats].
+ If no value is provided here, then the first from the list
+ of the Model's supported formats is used by default.
+ artifact_destination (google.cloud.aiplatform_v1.types.GcsDestination):
+ The Cloud Storage location where the Model artifact is to be
+ written to. Under the directory given as the destination a
+ new one with name
+ "``model-export--``",
+ where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601
+ format, will be created. Inside, the Model and any of its
+ supporting files will be written. This field should only be
+ set when the ``exportableContent`` field of the
+ [Model.supported_export_formats] object contains
+ ``ARTIFACT``.
+ image_destination (google.cloud.aiplatform_v1.types.ContainerRegistryDestination):
+ The Google Container Registry or Artifact Registry uri where
+ the Model container image will be copied to. This field
+ should only be set when the ``exportableContent`` field of
+ the [Model.supported_export_formats] object contains
+ ``IMAGE``.
+ """
+
+ export_format_id = proto.Field(proto.STRING, number=1)
+
+ artifact_destination = proto.Field(
+ proto.MESSAGE, number=3, message=io.GcsDestination,
+ )
+
+ image_destination = proto.Field(
+ proto.MESSAGE, number=4, message=io.ContainerRegistryDestination,
+ )
+
+ name = proto.Field(proto.STRING, number=1)
+
+ output_config = proto.Field(proto.MESSAGE, number=2, message=OutputConfig,)
+
+
+class ExportModelOperationMetadata(proto.Message):
+ r"""Details of
+ ``ModelService.ExportModel``
+ operation.
+
+ Attributes:
+ generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata):
+ The common part of the operation metadata.
+ output_info (google.cloud.aiplatform_v1.types.ExportModelOperationMetadata.OutputInfo):
+ Output only. Information further describing
+ the output of this Model export.
+ """
+
+ class OutputInfo(proto.Message):
+ r"""Further describes the output of the ExportModel. Supplements
+ ``ExportModelRequest.OutputConfig``.
+
+ Attributes:
+ artifact_output_uri (str):
+ Output only. If the Model artifact is being
+ exported to Google Cloud Storage this is the
+ full path of the directory created, into which
+ the Model files are being written to.
+ image_output_uri (str):
+ Output only. If the Model image is being
+ exported to Google Container Registry or
+ Artifact Registry this is the full path of the
+ image created.
+ """
+
+ artifact_output_uri = proto.Field(proto.STRING, number=2)
+
+ image_output_uri = proto.Field(proto.STRING, number=3)
+
+ generic_metadata = proto.Field(
+ proto.MESSAGE, number=1, message=operation.GenericOperationMetadata,
+ )
+
+ output_info = proto.Field(proto.MESSAGE, number=2, message=OutputInfo,)
+
+
+class ExportModelResponse(proto.Message):
+ r"""Response message of
+ ``ModelService.ExportModel``
+ operation.
+ """
+
+
+class GetModelEvaluationRequest(proto.Message):
+ r"""Request message for
+ ``ModelService.GetModelEvaluation``.
+
+ Attributes:
+ name (str):
+ Required. The name of the ModelEvaluation resource. Format:
+
+ ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}``
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+
+class ListModelEvaluationsRequest(proto.Message):
+ r"""Request message for
+ ``ModelService.ListModelEvaluations``.
+
+ Attributes:
+ parent (str):
+ Required. The resource name of the Model to list the
+ ModelEvaluations from. Format:
+ ``projects/{project}/locations/{location}/models/{model}``
+ filter (str):
+ The standard list filter.
+ page_size (int):
+ The standard list page size.
+ page_token (str):
+ The standard list page token. Typically obtained via
+ ``ListModelEvaluationsResponse.next_page_token``
+ of the previous
+ ``ModelService.ListModelEvaluations``
+ call.
+ read_mask (google.protobuf.field_mask_pb2.FieldMask):
+ Mask specifying which fields to read.
+ """
+
+ parent = proto.Field(proto.STRING, number=1)
+
+ filter = proto.Field(proto.STRING, number=2)
+
+ page_size = proto.Field(proto.INT32, number=3)
+
+ page_token = proto.Field(proto.STRING, number=4)
+
+ read_mask = proto.Field(proto.MESSAGE, number=5, message=field_mask.FieldMask,)
+
+
+class ListModelEvaluationsResponse(proto.Message):
+ r"""Response message for
+ ``ModelService.ListModelEvaluations``.
+
+ Attributes:
+ model_evaluations (Sequence[google.cloud.aiplatform_v1.types.ModelEvaluation]):
+ List of ModelEvaluations in the requested
+ page.
+ next_page_token (str):
+ A token to retrieve next page of results. Pass to
+ ``ListModelEvaluationsRequest.page_token``
+ to obtain that page.
+ """
+
+ @property
+ def raw_page(self):
+ return self
+
+ model_evaluations = proto.RepeatedField(
+ proto.MESSAGE, number=1, message=model_evaluation.ModelEvaluation,
+ )
+
+ next_page_token = proto.Field(proto.STRING, number=2)
+
+
+class GetModelEvaluationSliceRequest(proto.Message):
+ r"""Request message for
+ ``ModelService.GetModelEvaluationSlice``.
+
+ Attributes:
+ name (str):
+ Required. The name of the ModelEvaluationSlice resource.
+ Format:
+
+ ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}``
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+
+class ListModelEvaluationSlicesRequest(proto.Message):
+ r"""Request message for
+ ``ModelService.ListModelEvaluationSlices``.
+
+ Attributes:
+ parent (str):
+ Required. The resource name of the ModelEvaluation to list
+ the ModelEvaluationSlices from. Format:
+
+ ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}``
+ filter (str):
+ The standard list filter.
+
+ - ``slice.dimension`` - for =.
+ page_size (int):
+ The standard list page size.
+ page_token (str):
+ The standard list page token. Typically obtained via
+ ``ListModelEvaluationSlicesResponse.next_page_token``
+ of the previous
+ ``ModelService.ListModelEvaluationSlices``
+ call.
+ read_mask (google.protobuf.field_mask_pb2.FieldMask):
+ Mask specifying which fields to read.
+ """
+
+ parent = proto.Field(proto.STRING, number=1)
+
+ filter = proto.Field(proto.STRING, number=2)
+
+ page_size = proto.Field(proto.INT32, number=3)
+
+ page_token = proto.Field(proto.STRING, number=4)
+
+ read_mask = proto.Field(proto.MESSAGE, number=5, message=field_mask.FieldMask,)
+
+
+class ListModelEvaluationSlicesResponse(proto.Message):
+ r"""Response message for
+ ``ModelService.ListModelEvaluationSlices``.
+
+ Attributes:
+ model_evaluation_slices (Sequence[google.cloud.aiplatform_v1.types.ModelEvaluationSlice]):
+ List of ModelEvaluations in the requested
+ page.
+ next_page_token (str):
+ A token to retrieve next page of results. Pass to
+ ``ListModelEvaluationSlicesRequest.page_token``
+ to obtain that page.
+ """
+
+ @property
+ def raw_page(self):
+ return self
+
+ model_evaluation_slices = proto.RepeatedField(
+ proto.MESSAGE, number=1, message=model_evaluation_slice.ModelEvaluationSlice,
+ )
+
+ next_page_token = proto.Field(proto.STRING, number=2)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform_v1/types/operation.py b/google/cloud/aiplatform_v1/types/operation.py
new file mode 100644
index 0000000000..fe24030e79
--- /dev/null
+++ b/google/cloud/aiplatform_v1/types/operation.py
@@ -0,0 +1,73 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+from google.protobuf import timestamp_pb2 as timestamp # type: ignore
+from google.rpc import status_pb2 as status # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1",
+ manifest={"GenericOperationMetadata", "DeleteOperationMetadata",},
+)
+
+
+class GenericOperationMetadata(proto.Message):
+ r"""Generic Metadata shared by all operations.
+
+ Attributes:
+ partial_failures (Sequence[google.rpc.status_pb2.Status]):
+ Output only. Partial failures encountered.
+ E.g. single files that couldn't be read.
+ This field should never exceed 20 entries.
+ Status details field will contain standard GCP
+ error details.
+ create_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. Time when the operation was
+ created.
+ update_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. Time when the operation was
+ updated for the last time. If the operation has
+ finished (successfully or not), this is the
+ finish time.
+ """
+
+ partial_failures = proto.RepeatedField(
+ proto.MESSAGE, number=1, message=status.Status,
+ )
+
+ create_time = proto.Field(proto.MESSAGE, number=2, message=timestamp.Timestamp,)
+
+ update_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,)
+
+
+class DeleteOperationMetadata(proto.Message):
+ r"""Details of operations that perform deletes of any entities.
+
+ Attributes:
+ generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata):
+ The common part of the operation metadata.
+ """
+
+ generic_metadata = proto.Field(
+ proto.MESSAGE, number=1, message="GenericOperationMetadata",
+ )
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform_v1/types/pipeline_service.py b/google/cloud/aiplatform_v1/types/pipeline_service.py
new file mode 100644
index 0000000000..b2c6d5bbe3
--- /dev/null
+++ b/google/cloud/aiplatform_v1/types/pipeline_service.py
@@ -0,0 +1,175 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline
+from google.protobuf import field_mask_pb2 as field_mask # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1",
+ manifest={
+ "CreateTrainingPipelineRequest",
+ "GetTrainingPipelineRequest",
+ "ListTrainingPipelinesRequest",
+ "ListTrainingPipelinesResponse",
+ "DeleteTrainingPipelineRequest",
+ "CancelTrainingPipelineRequest",
+ },
+)
+
+
+class CreateTrainingPipelineRequest(proto.Message):
+ r"""Request message for
+ ``PipelineService.CreateTrainingPipeline``.
+
+ Attributes:
+ parent (str):
+ Required. The resource name of the Location to create the
+ TrainingPipeline in. Format:
+ ``projects/{project}/locations/{location}``
+ training_pipeline (google.cloud.aiplatform_v1.types.TrainingPipeline):
+ Required. The TrainingPipeline to create.
+ """
+
+ parent = proto.Field(proto.STRING, number=1)
+
+ training_pipeline = proto.Field(
+ proto.MESSAGE, number=2, message=gca_training_pipeline.TrainingPipeline,
+ )
+
+
+class GetTrainingPipelineRequest(proto.Message):
+ r"""Request message for
+ ``PipelineService.GetTrainingPipeline``.
+
+ Attributes:
+ name (str):
+ Required. The name of the TrainingPipeline resource. Format:
+
+ ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}``
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+
+class ListTrainingPipelinesRequest(proto.Message):
+ r"""Request message for
+ ``PipelineService.ListTrainingPipelines``.
+
+ Attributes:
+ parent (str):
+ Required. The resource name of the Location to list the
+ TrainingPipelines from. Format:
+ ``projects/{project}/locations/{location}``
+ filter (str):
+ The standard list filter. Supported fields:
+
+ - ``display_name`` supports = and !=.
+
+ - ``state`` supports = and !=.
+
+ Some examples of using the filter are:
+
+ - ``state="PIPELINE_STATE_SUCCEEDED" AND display_name="my_pipeline"``
+
+ - ``state="PIPELINE_STATE_RUNNING" OR display_name="my_pipeline"``
+
+ - ``NOT display_name="my_pipeline"``
+
+ - ``state="PIPELINE_STATE_FAILED"``
+ page_size (int):
+ The standard list page size.
+ page_token (str):
+ The standard list page token. Typically obtained via
+ ``ListTrainingPipelinesResponse.next_page_token``
+ of the previous
+ ``PipelineService.ListTrainingPipelines``
+ call.
+ read_mask (google.protobuf.field_mask_pb2.FieldMask):
+ Mask specifying which fields to read.
+ """
+
+ parent = proto.Field(proto.STRING, number=1)
+
+ filter = proto.Field(proto.STRING, number=2)
+
+ page_size = proto.Field(proto.INT32, number=3)
+
+ page_token = proto.Field(proto.STRING, number=4)
+
+ read_mask = proto.Field(proto.MESSAGE, number=5, message=field_mask.FieldMask,)
+
+
+class ListTrainingPipelinesResponse(proto.Message):
+ r"""Response message for
+ ``PipelineService.ListTrainingPipelines``
+
+ Attributes:
+ training_pipelines (Sequence[google.cloud.aiplatform_v1.types.TrainingPipeline]):
+ List of TrainingPipelines in the requested
+ page.
+ next_page_token (str):
+ A token to retrieve the next page of results. Pass to
+ ``ListTrainingPipelinesRequest.page_token``
+ to obtain that page.
+ """
+
+ @property
+ def raw_page(self):
+ return self
+
+ training_pipelines = proto.RepeatedField(
+ proto.MESSAGE, number=1, message=gca_training_pipeline.TrainingPipeline,
+ )
+
+ next_page_token = proto.Field(proto.STRING, number=2)
+
+
+class DeleteTrainingPipelineRequest(proto.Message):
+ r"""Request message for
+ ``PipelineService.DeleteTrainingPipeline``.
+
+ Attributes:
+ name (str):
+ Required. The name of the TrainingPipeline resource to be
+ deleted. Format:
+
+ ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}``
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+
+class CancelTrainingPipelineRequest(proto.Message):
+ r"""Request message for
+ ``PipelineService.CancelTrainingPipeline``.
+
+ Attributes:
+ name (str):
+ Required. The name of the TrainingPipeline to cancel.
+ Format:
+
+ ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}``
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform_v1/types/pipeline_state.py b/google/cloud/aiplatform_v1/types/pipeline_state.py
new file mode 100644
index 0000000000..f6a885ae42
--- /dev/null
+++ b/google/cloud/aiplatform_v1/types/pipeline_state.py
@@ -0,0 +1,39 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1", manifest={"PipelineState",},
+)
+
+
+class PipelineState(proto.Enum):
+ r"""Describes the state of a pipeline."""
+ PIPELINE_STATE_UNSPECIFIED = 0
+ PIPELINE_STATE_QUEUED = 1
+ PIPELINE_STATE_PENDING = 2
+ PIPELINE_STATE_RUNNING = 3
+ PIPELINE_STATE_SUCCEEDED = 4
+ PIPELINE_STATE_FAILED = 5
+ PIPELINE_STATE_CANCELLING = 6
+ PIPELINE_STATE_CANCELLED = 7
+ PIPELINE_STATE_PAUSED = 8
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform_v1/types/prediction_service.py b/google/cloud/aiplatform_v1/types/prediction_service.py
new file mode 100644
index 0000000000..21a01372f4
--- /dev/null
+++ b/google/cloud/aiplatform_v1/types/prediction_service.py
@@ -0,0 +1,88 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+from google.protobuf import struct_pb2 as struct # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1",
+ manifest={"PredictRequest", "PredictResponse",},
+)
+
+
+class PredictRequest(proto.Message):
+ r"""Request message for
+ ``PredictionService.Predict``.
+
+ Attributes:
+ endpoint (str):
+ Required. The name of the Endpoint requested to serve the
+ prediction. Format:
+ ``projects/{project}/locations/{location}/endpoints/{endpoint}``
+ instances (Sequence[google.protobuf.struct_pb2.Value]):
+ Required. The instances that are the input to the prediction
+ call. A DeployedModel may have an upper limit on the number
+ of instances it supports per request, and when it is
+ exceeded the prediction call errors in case of AutoML
+ Models, or, in case of customer created Models, the
+ behaviour is as documented by that Model. The schema of any
+ single instance may be specified via Endpoint's
+ DeployedModels'
+ [Model's][google.cloud.aiplatform.v1.DeployedModel.model]
+ [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata]
+ ``instance_schema_uri``.
+ parameters (google.protobuf.struct_pb2.Value):
+ The parameters that govern the prediction. The schema of the
+ parameters may be specified via Endpoint's DeployedModels'
+ [Model's ][google.cloud.aiplatform.v1.DeployedModel.model]
+ [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata]
+ ``parameters_schema_uri``.
+ """
+
+ endpoint = proto.Field(proto.STRING, number=1)
+
+ instances = proto.RepeatedField(proto.MESSAGE, number=2, message=struct.Value,)
+
+ parameters = proto.Field(proto.MESSAGE, number=3, message=struct.Value,)
+
+
+class PredictResponse(proto.Message):
+ r"""Response message for
+ ``PredictionService.Predict``.
+
+ Attributes:
+ predictions (Sequence[google.protobuf.struct_pb2.Value]):
+ The predictions that are the output of the predictions call.
+ The schema of any single prediction may be specified via
+ Endpoint's DeployedModels' [Model's
+ ][google.cloud.aiplatform.v1.DeployedModel.model]
+ [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata]
+ ``prediction_schema_uri``.
+ deployed_model_id (str):
+ ID of the Endpoint's DeployedModel that
+ served this prediction.
+ """
+
+ predictions = proto.RepeatedField(proto.MESSAGE, number=1, message=struct.Value,)
+
+ deployed_model_id = proto.Field(proto.STRING, number=2)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform_v1/types/specialist_pool.py b/google/cloud/aiplatform_v1/types/specialist_pool.py
new file mode 100644
index 0000000000..6265316bd5
--- /dev/null
+++ b/google/cloud/aiplatform_v1/types/specialist_pool.py
@@ -0,0 +1,68 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1", manifest={"SpecialistPool",},
+)
+
+
+class SpecialistPool(proto.Message):
+ r"""SpecialistPool represents customers' own workforce to work on
+ their data labeling jobs. It includes a group of specialist
+ managers who are responsible for managing the labelers in this
+ pool as well as customers' data labeling jobs associated with
+ this pool.
+ Customers create specialist pool as well as start data labeling
+ jobs on Cloud, managers and labelers work with the jobs using
+ CrowdCompute console.
+
+ Attributes:
+ name (str):
+ Required. The resource name of the
+ SpecialistPool.
+ display_name (str):
+ Required. The user-defined name of the
+ SpecialistPool. The name can be up to 128
+ characters long and can be consist of any UTF-8
+ characters.
+ This field should be unique on project-level.
+ specialist_managers_count (int):
+ Output only. The number of Specialists in
+ this SpecialistPool.
+ specialist_manager_emails (Sequence[str]):
+ The email addresses of the specialists in the
+ SpecialistPool.
+ pending_data_labeling_jobs (Sequence[str]):
+ Output only. The resource name of the pending
+ data labeling jobs.
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+ display_name = proto.Field(proto.STRING, number=2)
+
+ specialist_managers_count = proto.Field(proto.INT32, number=3)
+
+ specialist_manager_emails = proto.RepeatedField(proto.STRING, number=4)
+
+ pending_data_labeling_jobs = proto.RepeatedField(proto.STRING, number=5)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform_v1/types/specialist_pool_service.py b/google/cloud/aiplatform_v1/types/specialist_pool_service.py
new file mode 100644
index 0000000000..69e49bb355
--- /dev/null
+++ b/google/cloud/aiplatform_v1/types/specialist_pool_service.py
@@ -0,0 +1,205 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+from google.cloud.aiplatform_v1.types import operation
+from google.cloud.aiplatform_v1.types import specialist_pool as gca_specialist_pool
+from google.protobuf import field_mask_pb2 as field_mask # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1",
+ manifest={
+ "CreateSpecialistPoolRequest",
+ "CreateSpecialistPoolOperationMetadata",
+ "GetSpecialistPoolRequest",
+ "ListSpecialistPoolsRequest",
+ "ListSpecialistPoolsResponse",
+ "DeleteSpecialistPoolRequest",
+ "UpdateSpecialistPoolRequest",
+ "UpdateSpecialistPoolOperationMetadata",
+ },
+)
+
+
+class CreateSpecialistPoolRequest(proto.Message):
+ r"""Request message for
+ ``SpecialistPoolService.CreateSpecialistPool``.
+
+ Attributes:
+ parent (str):
+ Required. The parent Project name for the new
+ SpecialistPool. The form is
+ ``projects/{project}/locations/{location}``.
+ specialist_pool (google.cloud.aiplatform_v1.types.SpecialistPool):
+ Required. The SpecialistPool to create.
+ """
+
+ parent = proto.Field(proto.STRING, number=1)
+
+ specialist_pool = proto.Field(
+ proto.MESSAGE, number=2, message=gca_specialist_pool.SpecialistPool,
+ )
+
+
+class CreateSpecialistPoolOperationMetadata(proto.Message):
+ r"""Runtime operation information for
+ ``SpecialistPoolService.CreateSpecialistPool``.
+
+ Attributes:
+ generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata):
+ The operation generic information.
+ """
+
+ generic_metadata = proto.Field(
+ proto.MESSAGE, number=1, message=operation.GenericOperationMetadata,
+ )
+
+
+class GetSpecialistPoolRequest(proto.Message):
+ r"""Request message for
+ ``SpecialistPoolService.GetSpecialistPool``.
+
+ Attributes:
+ name (str):
+ Required. The name of the SpecialistPool resource. The form
+ is
+
+ ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``.
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+
+class ListSpecialistPoolsRequest(proto.Message):
+ r"""Request message for
+ ``SpecialistPoolService.ListSpecialistPools``.
+
+ Attributes:
+ parent (str):
+ Required. The name of the SpecialistPool's parent resource.
+ Format: ``projects/{project}/locations/{location}``
+ page_size (int):
+ The standard list page size.
+ page_token (str):
+ The standard list page token. Typically obtained by
+ ``ListSpecialistPoolsResponse.next_page_token``
+ of the previous
+ ``SpecialistPoolService.ListSpecialistPools``
+ call. Return first page if empty.
+ read_mask (google.protobuf.field_mask_pb2.FieldMask):
+ Mask specifying which fields to read.
+ FieldMask represents a set of
+ """
+
+ parent = proto.Field(proto.STRING, number=1)
+
+ page_size = proto.Field(proto.INT32, number=2)
+
+ page_token = proto.Field(proto.STRING, number=3)
+
+ read_mask = proto.Field(proto.MESSAGE, number=4, message=field_mask.FieldMask,)
+
+
+class ListSpecialistPoolsResponse(proto.Message):
+ r"""Response message for
+ ``SpecialistPoolService.ListSpecialistPools``.
+
+ Attributes:
+ specialist_pools (Sequence[google.cloud.aiplatform_v1.types.SpecialistPool]):
+ A list of SpecialistPools that matches the
+ specified filter in the request.
+ next_page_token (str):
+ The standard List next-page token.
+ """
+
+ @property
+ def raw_page(self):
+ return self
+
+ specialist_pools = proto.RepeatedField(
+ proto.MESSAGE, number=1, message=gca_specialist_pool.SpecialistPool,
+ )
+
+ next_page_token = proto.Field(proto.STRING, number=2)
+
+
+class DeleteSpecialistPoolRequest(proto.Message):
+ r"""Request message for
+ ``SpecialistPoolService.DeleteSpecialistPool``.
+
+ Attributes:
+ name (str):
+ Required. The resource name of the SpecialistPool to delete.
+ Format:
+ ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``
+ force (bool):
+ If set to true, any specialist managers in
+ this SpecialistPool will also be deleted.
+ (Otherwise, the request will only work if the
+ SpecialistPool has no specialist managers.)
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+ force = proto.Field(proto.BOOL, number=2)
+
+
+class UpdateSpecialistPoolRequest(proto.Message):
+ r"""Request message for
+ ``SpecialistPoolService.UpdateSpecialistPool``.
+
+ Attributes:
+ specialist_pool (google.cloud.aiplatform_v1.types.SpecialistPool):
+ Required. The SpecialistPool which replaces
+ the resource on the server.
+ update_mask (google.protobuf.field_mask_pb2.FieldMask):
+ Required. The update mask applies to the
+ resource.
+ """
+
+ specialist_pool = proto.Field(
+ proto.MESSAGE, number=1, message=gca_specialist_pool.SpecialistPool,
+ )
+
+ update_mask = proto.Field(proto.MESSAGE, number=2, message=field_mask.FieldMask,)
+
+
+class UpdateSpecialistPoolOperationMetadata(proto.Message):
+ r"""Runtime operation metadata for
+ ``SpecialistPoolService.UpdateSpecialistPool``.
+
+ Attributes:
+ specialist_pool (str):
+ Output only. The name of the SpecialistPool to which the
+ specialists are being added. Format:
+
+ ``projects/{project_id}/locations/{location_id}/specialistPools/{specialist_pool}``
+ generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata):
+ The operation generic information.
+ """
+
+ specialist_pool = proto.Field(proto.STRING, number=1)
+
+ generic_metadata = proto.Field(
+ proto.MESSAGE, number=2, message=operation.GenericOperationMetadata,
+ )
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform_v1/types/study.py b/google/cloud/aiplatform_v1/types/study.py
new file mode 100644
index 0000000000..99a688f045
--- /dev/null
+++ b/google/cloud/aiplatform_v1/types/study.py
@@ -0,0 +1,444 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+from google.protobuf import struct_pb2 as struct # type: ignore
+from google.protobuf import timestamp_pb2 as timestamp # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1",
+ manifest={"Trial", "StudySpec", "Measurement",},
+)
+
+
+class Trial(proto.Message):
+ r"""A message representing a Trial. A Trial contains a unique set
+ of Parameters that has been or will be evaluated, along with the
+ objective metrics got by running the Trial.
+
+ Attributes:
+ id (str):
+ Output only. The identifier of the Trial
+ assigned by the service.
+ state (google.cloud.aiplatform_v1.types.Trial.State):
+ Output only. The detailed state of the Trial.
+ parameters (Sequence[google.cloud.aiplatform_v1.types.Trial.Parameter]):
+ Output only. The parameters of the Trial.
+ final_measurement (google.cloud.aiplatform_v1.types.Measurement):
+ Output only. The final measurement containing
+ the objective value.
+ start_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. Time when the Trial was started.
+ end_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. Time when the Trial's status changed to
+ ``SUCCEEDED`` or ``INFEASIBLE``.
+ custom_job (str):
+ Output only. The CustomJob name linked to the
+ Trial. It's set for a HyperparameterTuningJob's
+ Trial.
+ """
+
+ class State(proto.Enum):
+ r"""Describes a Trial state."""
+ STATE_UNSPECIFIED = 0
+ REQUESTED = 1
+ ACTIVE = 2
+ STOPPING = 3
+ SUCCEEDED = 4
+ INFEASIBLE = 5
+
+ class Parameter(proto.Message):
+ r"""A message representing a parameter to be tuned.
+
+ Attributes:
+ parameter_id (str):
+ Output only. The ID of the parameter. The parameter should
+ be defined in [StudySpec's
+ Parameters][google.cloud.aiplatform.v1.StudySpec.parameters].
+ value (google.protobuf.struct_pb2.Value):
+ Output only. The value of the parameter. ``number_value``
+ will be set if a parameter defined in StudySpec is in type
+ 'INTEGER', 'DOUBLE' or 'DISCRETE'. ``string_value`` will be
+ set if a parameter defined in StudySpec is in type
+ 'CATEGORICAL'.
+ """
+
+ parameter_id = proto.Field(proto.STRING, number=1)
+
+ value = proto.Field(proto.MESSAGE, number=2, message=struct.Value,)
+
+ id = proto.Field(proto.STRING, number=2)
+
+ state = proto.Field(proto.ENUM, number=3, enum=State,)
+
+ parameters = proto.RepeatedField(proto.MESSAGE, number=4, message=Parameter,)
+
+ final_measurement = proto.Field(proto.MESSAGE, number=5, message="Measurement",)
+
+ start_time = proto.Field(proto.MESSAGE, number=7, message=timestamp.Timestamp,)
+
+ end_time = proto.Field(proto.MESSAGE, number=8, message=timestamp.Timestamp,)
+
+ custom_job = proto.Field(proto.STRING, number=11)
+
+
+class StudySpec(proto.Message):
+ r"""Represents specification of a Study.
+
+ Attributes:
+ metrics (Sequence[google.cloud.aiplatform_v1.types.StudySpec.MetricSpec]):
+ Required. Metric specs for the Study.
+ parameters (Sequence[google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec]):
+ Required. The set of parameters to tune.
+ algorithm (google.cloud.aiplatform_v1.types.StudySpec.Algorithm):
+ The search algorithm specified for the Study.
+ observation_noise (google.cloud.aiplatform_v1.types.StudySpec.ObservationNoise):
+ The observation noise level of the study.
+ Currently only supported by the Vizier service.
+ Not supported by HyperparamterTuningJob or
+ TrainingPipeline.
+ measurement_selection_type (google.cloud.aiplatform_v1.types.StudySpec.MeasurementSelectionType):
+ Describe which measurement selection type
+ will be used
+ """
+
+ class Algorithm(proto.Enum):
+ r"""The available search algorithms for the Study."""
+ ALGORITHM_UNSPECIFIED = 0
+ GRID_SEARCH = 2
+ RANDOM_SEARCH = 3
+
+ class ObservationNoise(proto.Enum):
+ r"""Describes the noise level of the repeated observations.
+ "Noisy" means that the repeated observations with the same Trial
+ parameters may lead to different metric evaluations.
+ """
+ OBSERVATION_NOISE_UNSPECIFIED = 0
+ LOW = 1
+ HIGH = 2
+
+ class MeasurementSelectionType(proto.Enum):
+ r"""This indicates which measurement to use if/when the service
+ automatically selects the final measurement from previously reported
+ intermediate measurements. Choose this based on two considerations:
+ A) Do you expect your measurements to monotonically improve? If so,
+ choose LAST_MEASUREMENT. On the other hand, if you're in a situation
+ where your system can "over-train" and you expect the performance to
+ get better for a while but then start declining, choose
+ BEST_MEASUREMENT. B) Are your measurements significantly noisy
+ and/or irreproducible? If so, BEST_MEASUREMENT will tend to be
+ over-optimistic, and it may be better to choose LAST_MEASUREMENT. If
+ both or neither of (A) and (B) apply, it doesn't matter which
+ selection type is chosen.
+ """
+ MEASUREMENT_SELECTION_TYPE_UNSPECIFIED = 0
+ LAST_MEASUREMENT = 1
+ BEST_MEASUREMENT = 2
+
+ class MetricSpec(proto.Message):
+ r"""Represents a metric to optimize.
+
+ Attributes:
+ metric_id (str):
+ Required. The ID of the metric. Must not
+ contain whitespaces and must be unique amongst
+ all MetricSpecs.
+ goal (google.cloud.aiplatform_v1.types.StudySpec.MetricSpec.GoalType):
+ Required. The optimization goal of the
+ metric.
+ """
+
+ class GoalType(proto.Enum):
+ r"""The available types of optimization goals."""
+ GOAL_TYPE_UNSPECIFIED = 0
+ MAXIMIZE = 1
+ MINIMIZE = 2
+
+ metric_id = proto.Field(proto.STRING, number=1)
+
+ goal = proto.Field(proto.ENUM, number=2, enum="StudySpec.MetricSpec.GoalType",)
+
+ class ParameterSpec(proto.Message):
+ r"""Represents a single parameter to optimize.
+
+ Attributes:
+ double_value_spec (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.DoubleValueSpec):
+ The value spec for a 'DOUBLE' parameter.
+ integer_value_spec (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.IntegerValueSpec):
+ The value spec for an 'INTEGER' parameter.
+ categorical_value_spec (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.CategoricalValueSpec):
+ The value spec for a 'CATEGORICAL' parameter.
+ discrete_value_spec (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.DiscreteValueSpec):
+ The value spec for a 'DISCRETE' parameter.
+ parameter_id (str):
+ Required. The ID of the parameter. Must not
+ contain whitespaces and must be unique amongst
+ all ParameterSpecs.
+ scale_type (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.ScaleType):
+ How the parameter should be scaled. Leave unset for
+ ``CATEGORICAL`` parameters.
+ conditional_parameter_specs (Sequence[google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.ConditionalParameterSpec]):
+ A conditional parameter node is active if the parameter's
+ value matches the conditional node's parent_value_condition.
+
+ If two items in conditional_parameter_specs have the same
+ name, they must have disjoint parent_value_condition.
+ """
+
+ class ScaleType(proto.Enum):
+ r"""The type of scaling that should be applied to this parameter."""
+ SCALE_TYPE_UNSPECIFIED = 0
+ UNIT_LINEAR_SCALE = 1
+ UNIT_LOG_SCALE = 2
+ UNIT_REVERSE_LOG_SCALE = 3
+
+ class DoubleValueSpec(proto.Message):
+ r"""Value specification for a parameter in ``DOUBLE`` type.
+
+ Attributes:
+ min_value (float):
+ Required. Inclusive minimum value of the
+ parameter.
+ max_value (float):
+ Required. Inclusive maximum value of the
+ parameter.
+ """
+
+ min_value = proto.Field(proto.DOUBLE, number=1)
+
+ max_value = proto.Field(proto.DOUBLE, number=2)
+
+ class IntegerValueSpec(proto.Message):
+ r"""Value specification for a parameter in ``INTEGER`` type.
+
+ Attributes:
+ min_value (int):
+ Required. Inclusive minimum value of the
+ parameter.
+ max_value (int):
+ Required. Inclusive maximum value of the
+ parameter.
+ """
+
+ min_value = proto.Field(proto.INT64, number=1)
+
+ max_value = proto.Field(proto.INT64, number=2)
+
+ class CategoricalValueSpec(proto.Message):
+ r"""Value specification for a parameter in ``CATEGORICAL`` type.
+
+ Attributes:
+ values (Sequence[str]):
+ Required. The list of possible categories.
+ """
+
+ values = proto.RepeatedField(proto.STRING, number=1)
+
+ class DiscreteValueSpec(proto.Message):
+ r"""Value specification for a parameter in ``DISCRETE`` type.
+
+ Attributes:
+ values (Sequence[float]):
+ Required. A list of possible values.
+ The list should be in increasing order and at
+ least 1e-10 apart. For instance, this parameter
+ might have possible settings of 1.5, 2.5, and
+ 4.0. This list should not contain more than
+ 1,000 values.
+ """
+
+ values = proto.RepeatedField(proto.DOUBLE, number=1)
+
+ class ConditionalParameterSpec(proto.Message):
+ r"""Represents a parameter spec with condition from its parent
+ parameter.
+
+ Attributes:
+ parent_discrete_values (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.ConditionalParameterSpec.DiscreteValueCondition):
+ The spec for matching values from a parent parameter of
+ ``DISCRETE`` type.
+ parent_int_values (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.ConditionalParameterSpec.IntValueCondition):
+ The spec for matching values from a parent parameter of
+ ``INTEGER`` type.
+ parent_categorical_values (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.ConditionalParameterSpec.CategoricalValueCondition):
+ The spec for matching values from a parent parameter of
+ ``CATEGORICAL`` type.
+ parameter_spec (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec):
+ Required. The spec for a conditional
+ parameter.
+ """
+
+ class DiscreteValueCondition(proto.Message):
+ r"""Represents the spec to match discrete values from parent
+ parameter.
+
+ Attributes:
+ values (Sequence[float]):
+ Required. Matches values of the parent parameter of
+ 'DISCRETE' type. All values must exist in
+ ``discrete_value_spec`` of parent parameter.
+
+ The Epsilon of the value matching is 1e-10.
+ """
+
+ values = proto.RepeatedField(proto.DOUBLE, number=1)
+
+ class IntValueCondition(proto.Message):
+ r"""Represents the spec to match integer values from parent
+ parameter.
+
+ Attributes:
+ values (Sequence[int]):
+ Required. Matches values of the parent parameter of
+ 'INTEGER' type. All values must lie in
+ ``integer_value_spec`` of parent parameter.
+ """
+
+ values = proto.RepeatedField(proto.INT64, number=1)
+
+ class CategoricalValueCondition(proto.Message):
+ r"""Represents the spec to match categorical values from parent
+ parameter.
+
+ Attributes:
+ values (Sequence[str]):
+ Required. Matches values of the parent parameter of
+ 'CATEGORICAL' type. All values must exist in
+ ``categorical_value_spec`` of parent parameter.
+ """
+
+ values = proto.RepeatedField(proto.STRING, number=1)
+
+ parent_discrete_values = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ oneof="parent_value_condition",
+ message="StudySpec.ParameterSpec.ConditionalParameterSpec.DiscreteValueCondition",
+ )
+
+ parent_int_values = proto.Field(
+ proto.MESSAGE,
+ number=3,
+ oneof="parent_value_condition",
+ message="StudySpec.ParameterSpec.ConditionalParameterSpec.IntValueCondition",
+ )
+
+ parent_categorical_values = proto.Field(
+ proto.MESSAGE,
+ number=4,
+ oneof="parent_value_condition",
+ message="StudySpec.ParameterSpec.ConditionalParameterSpec.CategoricalValueCondition",
+ )
+
+ parameter_spec = proto.Field(
+ proto.MESSAGE, number=1, message="StudySpec.ParameterSpec",
+ )
+
+ double_value_spec = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ oneof="parameter_value_spec",
+ message="StudySpec.ParameterSpec.DoubleValueSpec",
+ )
+
+ integer_value_spec = proto.Field(
+ proto.MESSAGE,
+ number=3,
+ oneof="parameter_value_spec",
+ message="StudySpec.ParameterSpec.IntegerValueSpec",
+ )
+
+ categorical_value_spec = proto.Field(
+ proto.MESSAGE,
+ number=4,
+ oneof="parameter_value_spec",
+ message="StudySpec.ParameterSpec.CategoricalValueSpec",
+ )
+
+ discrete_value_spec = proto.Field(
+ proto.MESSAGE,
+ number=5,
+ oneof="parameter_value_spec",
+ message="StudySpec.ParameterSpec.DiscreteValueSpec",
+ )
+
+ parameter_id = proto.Field(proto.STRING, number=1)
+
+ scale_type = proto.Field(
+ proto.ENUM, number=6, enum="StudySpec.ParameterSpec.ScaleType",
+ )
+
+ conditional_parameter_specs = proto.RepeatedField(
+ proto.MESSAGE,
+ number=10,
+ message="StudySpec.ParameterSpec.ConditionalParameterSpec",
+ )
+
+ metrics = proto.RepeatedField(proto.MESSAGE, number=1, message=MetricSpec,)
+
+ parameters = proto.RepeatedField(proto.MESSAGE, number=2, message=ParameterSpec,)
+
+ algorithm = proto.Field(proto.ENUM, number=3, enum=Algorithm,)
+
+ observation_noise = proto.Field(proto.ENUM, number=6, enum=ObservationNoise,)
+
+ measurement_selection_type = proto.Field(
+ proto.ENUM, number=7, enum=MeasurementSelectionType,
+ )
+
+
+class Measurement(proto.Message):
+ r"""A message representing a Measurement of a Trial. A
+ Measurement contains the Metrics got by executing a Trial using
+ suggested hyperparameter values.
+
+ Attributes:
+ step_count (int):
+ Output only. The number of steps the machine
+ learning model has been trained for. Must be
+ non-negative.
+ metrics (Sequence[google.cloud.aiplatform_v1.types.Measurement.Metric]):
+ Output only. A list of metrics got by
+ evaluating the objective functions using
+ suggested Parameter values.
+ """
+
+ class Metric(proto.Message):
+ r"""A message representing a metric in the measurement.
+
+ Attributes:
+ metric_id (str):
+ Output only. The ID of the Metric. The Metric should be
+ defined in [StudySpec's
+ Metrics][google.cloud.aiplatform.v1.StudySpec.metrics].
+ value (float):
+ Output only. The value for this metric.
+ """
+
+ metric_id = proto.Field(proto.STRING, number=1)
+
+ value = proto.Field(proto.DOUBLE, number=2)
+
+ step_count = proto.Field(proto.INT64, number=2)
+
+ metrics = proto.RepeatedField(proto.MESSAGE, number=3, message=Metric,)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/aiplatform_v1/types/training_pipeline.py b/google/cloud/aiplatform_v1/types/training_pipeline.py
new file mode 100644
index 0000000000..9a41f231a5
--- /dev/null
+++ b/google/cloud/aiplatform_v1/types/training_pipeline.py
@@ -0,0 +1,467 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec
+from google.cloud.aiplatform_v1.types import io
+from google.cloud.aiplatform_v1.types import model
+from google.cloud.aiplatform_v1.types import pipeline_state
+from google.protobuf import struct_pb2 as struct # type: ignore
+from google.protobuf import timestamp_pb2 as timestamp # type: ignore
+from google.rpc import status_pb2 as status # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.aiplatform.v1",
+ manifest={
+ "TrainingPipeline",
+ "InputDataConfig",
+ "FractionSplit",
+ "FilterSplit",
+ "PredefinedSplit",
+ "TimestampSplit",
+ },
+)
+
+
+class TrainingPipeline(proto.Message):
+ r"""The TrainingPipeline orchestrates tasks associated with training a
+ Model. It always executes the training task, and optionally may also
+ export data from AI Platform's Dataset which becomes the training
+ input, ``upload``
+ the Model to AI Platform, and evaluate the Model.
+
+ Attributes:
+ name (str):
+ Output only. Resource name of the
+ TrainingPipeline.
+ display_name (str):
+ Required. The user-defined name of this
+ TrainingPipeline.
+ input_data_config (google.cloud.aiplatform_v1.types.InputDataConfig):
+ Specifies AI Platform owned input data that may be used for
+ training the Model. The TrainingPipeline's
+ ``training_task_definition``
+ should make clear whether this config is used and if there
+ are any special requirements on how it should be filled. If
+ nothing about this config is mentioned in the
+ ``training_task_definition``,
+ then it should be assumed that the TrainingPipeline does not
+ depend on this configuration.
+ training_task_definition (str):
+ Required. A Google Cloud Storage path to the
+ YAML file that defines the training task which
+ is responsible for producing the model artifact,
+ and may also include additional auxiliary work.
+ The definition files that can be used here are
+ found in gs://google-cloud-
+ aiplatform/schema/trainingjob/definition/. Note:
+ The URI given on output will be immutable and
+ probably different, including the URI scheme,
+ than the one given on input. The output URI will
+ point to a location where the user only has a
+ read access.
+ training_task_inputs (google.protobuf.struct_pb2.Value):
+ Required. The training task's parameter(s), as specified in
+ the
+ ``training_task_definition``'s
+ ``inputs``.
+ training_task_metadata (google.protobuf.struct_pb2.Value):
+ Output only. The metadata information as specified in the
+ ``training_task_definition``'s
+ ``metadata``. This metadata is an auxiliary runtime and
+ final information about the training task. While the
+ pipeline is running this information is populated only at a
+ best effort basis. Only present if the pipeline's
+ ``training_task_definition``
+ contains ``metadata`` object.
+ model_to_upload (google.cloud.aiplatform_v1.types.Model):
+ Describes the Model that may be uploaded (via
+ ``ModelService.UploadModel``)
+ by this TrainingPipeline. The TrainingPipeline's
+ ``training_task_definition``
+ should make clear whether this Model description should be
+ populated, and if there are any special requirements
+ regarding how it should be filled. If nothing is mentioned
+ in the
+ ``training_task_definition``,
+ then it should be assumed that this field should not be
+ filled and the training task either uploads the Model
+ without a need of this information, or that training task
+ does not support uploading a Model as part of the pipeline.
+ When the Pipeline's state becomes
+ ``PIPELINE_STATE_SUCCEEDED`` and the trained Model had been
+ uploaded into AI Platform, then the model_to_upload's
+ resource ``name`` is
+ populated. The Model is always uploaded into the Project and
+ Location in which this pipeline is.
+ state (google.cloud.aiplatform_v1.types.PipelineState):
+ Output only. The detailed state of the
+ pipeline.
+ error (google.rpc.status_pb2.Status):
+ Output only. Only populated when the pipeline's state is
+ ``PIPELINE_STATE_FAILED`` or ``PIPELINE_STATE_CANCELLED``.
+ create_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. Time when the TrainingPipeline
+ was created.
+ start_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. Time when the TrainingPipeline for the first
+ time entered the ``PIPELINE_STATE_RUNNING`` state.
+ end_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. Time when the TrainingPipeline entered any of
+ the following states: ``PIPELINE_STATE_SUCCEEDED``,
+ ``PIPELINE_STATE_FAILED``, ``PIPELINE_STATE_CANCELLED``.
+ update_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. Time when the TrainingPipeline
+ was most recently updated.
+ labels (Sequence[google.cloud.aiplatform_v1.types.TrainingPipeline.LabelsEntry]):
+ The labels with user-defined metadata to
+ organize TrainingPipelines.
+ Label keys and values can be no longer than 64
+ characters (Unicode codepoints), can only
+ contain lowercase letters, numeric characters,
+ underscores and dashes. International characters
+ are allowed.
+ See https://goo.gl/xmQnxf for more information
+ and examples of labels.
+ encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec):
+ Customer-managed encryption key spec for a TrainingPipeline.
+ If set, this TrainingPipeline will be secured by this key.
+
+ Note: Model trained by this TrainingPipeline is also secured
+ by this key if
+ ``model_to_upload``
+ is not set separately.
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+ display_name = proto.Field(proto.STRING, number=2)
+
+ input_data_config = proto.Field(proto.MESSAGE, number=3, message="InputDataConfig",)
+
+ training_task_definition = proto.Field(proto.STRING, number=4)
+
+ training_task_inputs = proto.Field(proto.MESSAGE, number=5, message=struct.Value,)
+
+ training_task_metadata = proto.Field(proto.MESSAGE, number=6, message=struct.Value,)
+
+ model_to_upload = proto.Field(proto.MESSAGE, number=7, message=model.Model,)
+
+ state = proto.Field(proto.ENUM, number=9, enum=pipeline_state.PipelineState,)
+
+ error = proto.Field(proto.MESSAGE, number=10, message=status.Status,)
+
+ create_time = proto.Field(proto.MESSAGE, number=11, message=timestamp.Timestamp,)
+
+ start_time = proto.Field(proto.MESSAGE, number=12, message=timestamp.Timestamp,)
+
+ end_time = proto.Field(proto.MESSAGE, number=13, message=timestamp.Timestamp,)
+
+ update_time = proto.Field(proto.MESSAGE, number=14, message=timestamp.Timestamp,)
+
+ labels = proto.MapField(proto.STRING, proto.STRING, number=15)
+
+ encryption_spec = proto.Field(
+ proto.MESSAGE, number=18, message=gca_encryption_spec.EncryptionSpec,
+ )
+
+
+class InputDataConfig(proto.Message):
+ r"""Specifies AI Platform owned input data to be used for
+ training, and possibly evaluating, the Model.
+
+ Attributes:
+ fraction_split (google.cloud.aiplatform_v1.types.FractionSplit):
+ Split based on fractions defining the size of
+ each set.
+ filter_split (google.cloud.aiplatform_v1.types.FilterSplit):
+ Split based on the provided filters for each
+ set.
+ predefined_split (google.cloud.aiplatform_v1.types.PredefinedSplit):
+ Supported only for tabular Datasets.
+ Split based on a predefined key.
+ timestamp_split (google.cloud.aiplatform_v1.types.TimestampSplit):
+ Supported only for tabular Datasets.
+ Split based on the timestamp of the input data
+ pieces.
+ gcs_destination (google.cloud.aiplatform_v1.types.GcsDestination):
+ The Cloud Storage location where the training data is to be
+ written to. In the given directory a new directory is
+ created with name:
+ ``dataset---``
+ where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601
+ format. All training input data is written into that
+ directory.
+
+ The AI Platform environment variables representing Cloud
+ Storage data URIs are represented in the Cloud Storage
+ wildcard format to support sharded data. e.g.:
+ "gs://.../training-*.jsonl"
+
+ - AIP_DATA_FORMAT = "jsonl" for non-tabular data, "csv" for
+ tabular data
+ - AIP_TRAINING_DATA_URI =
+
+ "gcs_destination/dataset---/training-*.${AIP_DATA_FORMAT}"
+
+ - AIP_VALIDATION_DATA_URI =
+
+ "gcs_destination/dataset---/validation-*.${AIP_DATA_FORMAT}"
+
+ - AIP_TEST_DATA_URI =
+
+ "gcs_destination/dataset---/test-*.${AIP_DATA_FORMAT}".
+ bigquery_destination (google.cloud.aiplatform_v1.types.BigQueryDestination):
+ Only applicable to custom training with tabular Dataset with
+ BigQuery source.
+
+ The BigQuery project location where the training data is to
+ be written to. In the given project a new dataset is created
+ with name
+ ``dataset_