From 019b6102c2dc98550592cde0adfbb4958faddbef Mon Sep 17 00:00:00 2001 From: Jason Dai Date: Thu, 30 May 2024 16:07:16 -0700 Subject: [PATCH 01/36] fix: deep copy dataset before passing it to evaluation PiperOrigin-RevId: 638817266 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 20 ------------------- google/cloud/aiplatform/gapic_version.py | 2 +- .../schema/predict/instance/gapic_version.py | 2 +- .../predict/instance_v1/gapic_version.py | 2 +- .../v1/schema/predict/params/gapic_version.py | 2 +- .../schema/predict/params_v1/gapic_version.py | 2 +- .../predict/prediction/gapic_version.py | 2 +- .../predict/prediction_v1/gapic_version.py | 2 +- .../trainingjob/definition/gapic_version.py | 2 +- .../definition_v1/gapic_version.py | 2 +- .../schema/predict/instance/gapic_version.py | 2 +- .../predict/instance_v1beta1/gapic_version.py | 2 +- .../schema/predict/params/gapic_version.py | 2 +- .../predict/params_v1beta1/gapic_version.py | 2 +- .../predict/prediction/gapic_version.py | 2 +- .../prediction_v1beta1/gapic_version.py | 2 +- .../trainingjob/definition/gapic_version.py | 2 +- .../definition_v1beta1/gapic_version.py | 2 +- google/cloud/aiplatform/version.py | 2 +- google/cloud/aiplatform_v1/gapic_version.py | 2 +- .../cloud/aiplatform_v1beta1/gapic_version.py | 2 +- pypi/_vertex_ai_placeholder/version.py | 2 +- ...t_metadata_google.cloud.aiplatform.v1.json | 2 +- ...adata_google.cloud.aiplatform.v1beta1.json | 2 +- vertexai/preview/evaluation/_evaluation.py | 2 +- 26 files changed, 25 insertions(+), 45 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 620ccf0c92..c5435061da 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.53.0" + ".": "1.52.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 8259a99f16..7325a6ae20 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,25 +1,5 @@ # Changelog -## [1.53.0](https://github.com/googleapis/python-aiplatform/compare/v1.52.0...v1.53.0) (2024-05-30) - - -### Features - -* Add a `cloneable` protocol for Reasoning Engine. ([8960a80](https://github.com/googleapis/python-aiplatform/commit/8960a8022dc7556413a83786fc14e25e91df8362)) -* Add labels parameter to the supervised tuning train method ([f7c5567](https://github.com/googleapis/python-aiplatform/commit/f7c5567b6e44895033cf52e6f80a1bb55fb5f647)) -* Added reboot command for PersistentResource ([7785f8c](https://github.com/googleapis/python-aiplatform/commit/7785f8c327ee17da4827840396c49063b8e6d18f)) -* Added the new `GenerationConfig.response_schema` field ([#3772](https://github.com/googleapis/python-aiplatform/issues/3772)) ([5436d88](https://github.com/googleapis/python-aiplatform/commit/5436d88bf8d6c9b6a9df5a496afdc25106463d30)) -* Enable Tensorboard profile plugin in all regions by default. ([8a4a41a](https://github.com/googleapis/python-aiplatform/commit/8a4a41afe47aaff2f69a73e5011b34bcba5cd2e9)) -* GenAI - Added the `response_schema` parameter to the `GenerationConfig` class ([b5e2c02](https://github.com/googleapis/python-aiplatform/commit/b5e2c0204070e5f7fb695d39c7e5d23f937dbffd)) -* LLM - Added the `seed` parameter to the `TextGenerationModel`'s `predict` methods ([cb2f4aa](https://github.com/googleapis/python-aiplatform/commit/cb2f4aa021af05c90e54c5e41c1c91f9d8bf13b8)) - - -### Bug Fixes - -* Create run_name when run_name_prefix is not specified for Tensorboard uploader. ([ac17d87](https://github.com/googleapis/python-aiplatform/commit/ac17d876074f3fb51ab6c04beff0d3985df54633)) -* GenAI - Tuning - Supervised - Fix `adapter_size` parameter handling to match enum values. ([1cc22c3](https://github.com/googleapis/python-aiplatform/commit/1cc22c3c3561f7c6374d32fafd45839256064958)) -* Model Monitor console uri. ([71fbc81](https://github.com/googleapis/python-aiplatform/commit/71fbc81df8fa0d7c863233abc3ed6d40666c1623)) - ## [1.52.0](https://github.com/googleapis/python-aiplatform/compare/v1.51.0...v1.52.0) (2024-05-21) diff --git a/google/cloud/aiplatform/gapic_version.py b/google/cloud/aiplatform/gapic_version.py index b2b84effcb..33b7cbe7a9 100644 --- a/google/cloud/aiplatform/gapic_version.py +++ b/google/cloud/aiplatform/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.53.0" # {x-release-please-version} +__version__ = "1.52.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/instance/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/instance/gapic_version.py index b2b84effcb..33b7cbe7a9 100644 --- a/google/cloud/aiplatform/v1/schema/predict/instance/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/instance/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.53.0" # {x-release-please-version} +__version__ = "1.52.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_version.py index b2b84effcb..33b7cbe7a9 100644 --- a/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.53.0" # {x-release-please-version} +__version__ = "1.52.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/params/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/params/gapic_version.py index b2b84effcb..33b7cbe7a9 100644 --- a/google/cloud/aiplatform/v1/schema/predict/params/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/params/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.53.0" # {x-release-please-version} +__version__ = "1.52.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_version.py index b2b84effcb..33b7cbe7a9 100644 --- a/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.53.0" # {x-release-please-version} +__version__ = "1.52.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/prediction/gapic_version.py index b2b84effcb..33b7cbe7a9 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.53.0" # {x-release-please-version} +__version__ = "1.52.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_version.py index b2b84effcb..33b7cbe7a9 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.53.0" # {x-release-please-version} +__version__ = "1.52.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition/gapic_version.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition/gapic_version.py index b2b84effcb..33b7cbe7a9 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.53.0" # {x-release-please-version} +__version__ = "1.52.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_version.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_version.py index b2b84effcb..33b7cbe7a9 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.53.0" # {x-release-please-version} +__version__ = "1.52.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance/gapic_version.py index b2b84effcb..33b7cbe7a9 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.53.0" # {x-release-please-version} +__version__ = "1.52.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_version.py index b2b84effcb..33b7cbe7a9 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.53.0" # {x-release-please-version} +__version__ = "1.52.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/params/gapic_version.py index b2b84effcb..33b7cbe7a9 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/params/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.53.0" # {x-release-please-version} +__version__ = "1.52.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_version.py index b2b84effcb..33b7cbe7a9 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.53.0" # {x-release-please-version} +__version__ = "1.52.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction/gapic_version.py index b2b84effcb..33b7cbe7a9 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.53.0" # {x-release-please-version} +__version__ = "1.52.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_version.py index b2b84effcb..33b7cbe7a9 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.53.0" # {x-release-please-version} +__version__ = "1.52.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/gapic_version.py index b2b84effcb..33b7cbe7a9 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.53.0" # {x-release-please-version} +__version__ = "1.52.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_version.py index b2b84effcb..33b7cbe7a9 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.53.0" # {x-release-please-version} +__version__ = "1.52.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/version.py b/google/cloud/aiplatform/version.py index 1fbaf641c4..9879eee46f 100644 --- a/google/cloud/aiplatform/version.py +++ b/google/cloud/aiplatform/version.py @@ -15,4 +15,4 @@ # limitations under the License. # -__version__ = "1.53.0" +__version__ = "1.52.0" diff --git a/google/cloud/aiplatform_v1/gapic_version.py b/google/cloud/aiplatform_v1/gapic_version.py index b2b84effcb..33b7cbe7a9 100644 --- a/google/cloud/aiplatform_v1/gapic_version.py +++ b/google/cloud/aiplatform_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.53.0" # {x-release-please-version} +__version__ = "1.52.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform_v1beta1/gapic_version.py b/google/cloud/aiplatform_v1beta1/gapic_version.py index b2b84effcb..33b7cbe7a9 100644 --- a/google/cloud/aiplatform_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.53.0" # {x-release-please-version} +__version__ = "1.52.0" # {x-release-please-version} diff --git a/pypi/_vertex_ai_placeholder/version.py b/pypi/_vertex_ai_placeholder/version.py index 57b90e71a2..ffe1268267 100644 --- a/pypi/_vertex_ai_placeholder/version.py +++ b/pypi/_vertex_ai_placeholder/version.py @@ -15,4 +15,4 @@ # limitations under the License. # -__version__ = "1.53.0" +__version__ = "1.52.0" diff --git a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json index a939560bbb..d3cffb95f6 100644 --- a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json +++ b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-aiplatform", - "version": "1.53.0" + "version": "0.1.0" }, "snippets": [ { diff --git a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json index 13067b9265..0343e19c8d 100644 --- a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json +++ b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-aiplatform", - "version": "1.53.0" + "version": "0.1.0" }, "snippets": [ { diff --git a/vertexai/preview/evaluation/_evaluation.py b/vertexai/preview/evaluation/_evaluation.py index fd348b7358..a431bfd409 100644 --- a/vertexai/preview/evaluation/_evaluation.py +++ b/vertexai/preview/evaluation/_evaluation.py @@ -636,7 +636,7 @@ def evaluate( raise ValueError("Metrics cannot be empty.") evaluation_run_config = evaluation_base.EvaluationRunConfig( - dataset=dataset, + dataset=dataset.copy(deep=True), metrics=_replace_metric_bundle_with_metrics(metrics), column_map={ constants.Dataset.CONTENT_COLUMN: content_column_name, From af1ead84d375c995b9d71bbf7ac6cecc05d5f4ba Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Thu, 30 May 2024 16:19:34 -0700 Subject: [PATCH 02/36] chore(main): release 1.53.0 Copybara import of the project: -- 88713bd49649baf8ecaf627379ad4eafbe4f2d4f by release-please[bot] <55107282+release-please[bot]@users.noreply.github.com>: chore(main): release 1.53.0 COPYBARA_INTEGRATE_REVIEW=https://github.com/googleapis/python-aiplatform/pull/3825 from googleapis:release-please--branches--main 88713bd49649baf8ecaf627379ad4eafbe4f2d4f PiperOrigin-RevId: 638821013 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 20 +++++++++++++++++++ google/cloud/aiplatform/gapic_version.py | 2 +- .../schema/predict/instance/gapic_version.py | 2 +- .../predict/instance_v1/gapic_version.py | 2 +- .../v1/schema/predict/params/gapic_version.py | 2 +- .../schema/predict/params_v1/gapic_version.py | 2 +- .../predict/prediction/gapic_version.py | 2 +- .../predict/prediction_v1/gapic_version.py | 2 +- .../trainingjob/definition/gapic_version.py | 2 +- .../definition_v1/gapic_version.py | 2 +- .../schema/predict/instance/gapic_version.py | 2 +- .../predict/instance_v1beta1/gapic_version.py | 2 +- .../schema/predict/params/gapic_version.py | 2 +- .../predict/params_v1beta1/gapic_version.py | 2 +- .../predict/prediction/gapic_version.py | 2 +- .../prediction_v1beta1/gapic_version.py | 2 +- .../trainingjob/definition/gapic_version.py | 2 +- .../definition_v1beta1/gapic_version.py | 2 +- google/cloud/aiplatform/version.py | 2 +- google/cloud/aiplatform_v1/gapic_version.py | 2 +- .../cloud/aiplatform_v1beta1/gapic_version.py | 2 +- pypi/_vertex_ai_placeholder/version.py | 2 +- ...t_metadata_google.cloud.aiplatform.v1.json | 2 +- ...adata_google.cloud.aiplatform.v1beta1.json | 2 +- 25 files changed, 44 insertions(+), 24 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index c5435061da..620ccf0c92 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.52.0" + ".": "1.53.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 7325a6ae20..8259a99f16 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,25 @@ # Changelog +## [1.53.0](https://github.com/googleapis/python-aiplatform/compare/v1.52.0...v1.53.0) (2024-05-30) + + +### Features + +* Add a `cloneable` protocol for Reasoning Engine. ([8960a80](https://github.com/googleapis/python-aiplatform/commit/8960a8022dc7556413a83786fc14e25e91df8362)) +* Add labels parameter to the supervised tuning train method ([f7c5567](https://github.com/googleapis/python-aiplatform/commit/f7c5567b6e44895033cf52e6f80a1bb55fb5f647)) +* Added reboot command for PersistentResource ([7785f8c](https://github.com/googleapis/python-aiplatform/commit/7785f8c327ee17da4827840396c49063b8e6d18f)) +* Added the new `GenerationConfig.response_schema` field ([#3772](https://github.com/googleapis/python-aiplatform/issues/3772)) ([5436d88](https://github.com/googleapis/python-aiplatform/commit/5436d88bf8d6c9b6a9df5a496afdc25106463d30)) +* Enable Tensorboard profile plugin in all regions by default. ([8a4a41a](https://github.com/googleapis/python-aiplatform/commit/8a4a41afe47aaff2f69a73e5011b34bcba5cd2e9)) +* GenAI - Added the `response_schema` parameter to the `GenerationConfig` class ([b5e2c02](https://github.com/googleapis/python-aiplatform/commit/b5e2c0204070e5f7fb695d39c7e5d23f937dbffd)) +* LLM - Added the `seed` parameter to the `TextGenerationModel`'s `predict` methods ([cb2f4aa](https://github.com/googleapis/python-aiplatform/commit/cb2f4aa021af05c90e54c5e41c1c91f9d8bf13b8)) + + +### Bug Fixes + +* Create run_name when run_name_prefix is not specified for Tensorboard uploader. ([ac17d87](https://github.com/googleapis/python-aiplatform/commit/ac17d876074f3fb51ab6c04beff0d3985df54633)) +* GenAI - Tuning - Supervised - Fix `adapter_size` parameter handling to match enum values. ([1cc22c3](https://github.com/googleapis/python-aiplatform/commit/1cc22c3c3561f7c6374d32fafd45839256064958)) +* Model Monitor console uri. ([71fbc81](https://github.com/googleapis/python-aiplatform/commit/71fbc81df8fa0d7c863233abc3ed6d40666c1623)) + ## [1.52.0](https://github.com/googleapis/python-aiplatform/compare/v1.51.0...v1.52.0) (2024-05-21) diff --git a/google/cloud/aiplatform/gapic_version.py b/google/cloud/aiplatform/gapic_version.py index 33b7cbe7a9..b2b84effcb 100644 --- a/google/cloud/aiplatform/gapic_version.py +++ b/google/cloud/aiplatform/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.52.0" # {x-release-please-version} +__version__ = "1.53.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/instance/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/instance/gapic_version.py index 33b7cbe7a9..b2b84effcb 100644 --- a/google/cloud/aiplatform/v1/schema/predict/instance/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/instance/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.52.0" # {x-release-please-version} +__version__ = "1.53.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_version.py index 33b7cbe7a9..b2b84effcb 100644 --- a/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.52.0" # {x-release-please-version} +__version__ = "1.53.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/params/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/params/gapic_version.py index 33b7cbe7a9..b2b84effcb 100644 --- a/google/cloud/aiplatform/v1/schema/predict/params/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/params/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.52.0" # {x-release-please-version} +__version__ = "1.53.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_version.py index 33b7cbe7a9..b2b84effcb 100644 --- a/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.52.0" # {x-release-please-version} +__version__ = "1.53.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/prediction/gapic_version.py index 33b7cbe7a9..b2b84effcb 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.52.0" # {x-release-please-version} +__version__ = "1.53.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_version.py index 33b7cbe7a9..b2b84effcb 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.52.0" # {x-release-please-version} +__version__ = "1.53.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition/gapic_version.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition/gapic_version.py index 33b7cbe7a9..b2b84effcb 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.52.0" # {x-release-please-version} +__version__ = "1.53.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_version.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_version.py index 33b7cbe7a9..b2b84effcb 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.52.0" # {x-release-please-version} +__version__ = "1.53.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance/gapic_version.py index 33b7cbe7a9..b2b84effcb 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.52.0" # {x-release-please-version} +__version__ = "1.53.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_version.py index 33b7cbe7a9..b2b84effcb 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.52.0" # {x-release-please-version} +__version__ = "1.53.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/params/gapic_version.py index 33b7cbe7a9..b2b84effcb 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/params/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.52.0" # {x-release-please-version} +__version__ = "1.53.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_version.py index 33b7cbe7a9..b2b84effcb 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.52.0" # {x-release-please-version} +__version__ = "1.53.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction/gapic_version.py index 33b7cbe7a9..b2b84effcb 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.52.0" # {x-release-please-version} +__version__ = "1.53.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_version.py index 33b7cbe7a9..b2b84effcb 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.52.0" # {x-release-please-version} +__version__ = "1.53.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/gapic_version.py index 33b7cbe7a9..b2b84effcb 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.52.0" # {x-release-please-version} +__version__ = "1.53.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_version.py index 33b7cbe7a9..b2b84effcb 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.52.0" # {x-release-please-version} +__version__ = "1.53.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/version.py b/google/cloud/aiplatform/version.py index 9879eee46f..1fbaf641c4 100644 --- a/google/cloud/aiplatform/version.py +++ b/google/cloud/aiplatform/version.py @@ -15,4 +15,4 @@ # limitations under the License. # -__version__ = "1.52.0" +__version__ = "1.53.0" diff --git a/google/cloud/aiplatform_v1/gapic_version.py b/google/cloud/aiplatform_v1/gapic_version.py index 33b7cbe7a9..b2b84effcb 100644 --- a/google/cloud/aiplatform_v1/gapic_version.py +++ b/google/cloud/aiplatform_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.52.0" # {x-release-please-version} +__version__ = "1.53.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform_v1beta1/gapic_version.py b/google/cloud/aiplatform_v1beta1/gapic_version.py index 33b7cbe7a9..b2b84effcb 100644 --- a/google/cloud/aiplatform_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.52.0" # {x-release-please-version} +__version__ = "1.53.0" # {x-release-please-version} diff --git a/pypi/_vertex_ai_placeholder/version.py b/pypi/_vertex_ai_placeholder/version.py index ffe1268267..57b90e71a2 100644 --- a/pypi/_vertex_ai_placeholder/version.py +++ b/pypi/_vertex_ai_placeholder/version.py @@ -15,4 +15,4 @@ # limitations under the License. # -__version__ = "1.52.0" +__version__ = "1.53.0" diff --git a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json index d3cffb95f6..a939560bbb 100644 --- a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json +++ b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-aiplatform", - "version": "0.1.0" + "version": "1.53.0" }, "snippets": [ { diff --git a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json index 0343e19c8d..13067b9265 100644 --- a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json +++ b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-aiplatform", - "version": "0.1.0" + "version": "1.53.0" }, "snippets": [ { From 0c874a407193e9c6e1774a5a41c8a6834bcd689d Mon Sep 17 00:00:00 2001 From: Alexey Volkov Date: Thu, 30 May 2024 23:30:38 -0700 Subject: [PATCH 03/36] chore: Fixed all linting issues and enabled linting in vertexai PiperOrigin-RevId: 638919820 --- noxfile.py | 6 +++--- tests/unit/vertexai/test_extensions.py | 6 +----- vertexai/_model_garden/_model_garden_models.py | 3 --- vertexai/extensions/_extensions.py | 4 ++-- vertexai/language_models/_language_models.py | 2 +- vertexai/preview/language_models.py | 3 --- vertexai/reasoning_engines/_reasoning_engines.py | 7 ++----- vertexai/tuning/_tuning.py | 3 +-- 8 files changed, 10 insertions(+), 24 deletions(-) diff --git a/noxfile.py b/noxfile.py index ccfc642ddc..83293f34f8 100644 --- a/noxfile.py +++ b/noxfile.py @@ -95,7 +95,7 @@ def lint(session): "--diff", *LINT_PATHS, ) - session.run("flake8", "google", "tests") + session.run("flake8", *LINT_PATHS) @nox.session(python=DEFAULT_PYTHON_VERSION) @@ -225,7 +225,7 @@ def unit_ray(session, ray): def unit_langchain(session): # Install all test dependencies, then install this package in-place. - constraints_path = str(CURRENT_DIRECTORY / "testing" / f"constraints-langchain.txt") + constraints_path = str(CURRENT_DIRECTORY / "testing" / "constraints-langchain.txt") standard_deps = UNIT_TEST_STANDARD_DEPENDENCIES + UNIT_TEST_DEPENDENCIES session.install(*standard_deps, "-c", constraints_path) @@ -236,7 +236,7 @@ def unit_langchain(session): session.run( "py.test", "--quiet", - f"--junitxml=unit_langchain_sponge_log.xml", + "--junitxml=unit_langchain_sponge_log.xml", "--cov=google", "--cov-append", "--cov-config=.coveragerc", diff --git a/tests/unit/vertexai/test_extensions.py b/tests/unit/vertexai/test_extensions.py index 3bda374c3e..ecb3afafd0 100644 --- a/tests/unit/vertexai/test_extensions.py +++ b/tests/unit/vertexai/test_extensions.py @@ -176,11 +176,7 @@ def execute_extension_mock(): with mock.patch.object( extension_execution_service.ExtensionExecutionServiceClient, "execute_extension" ) as execute_extension_mock: - response_mock = mock.MagicMock() - response_mock.content.return_value = _TEST_RESPONSE_CONTENT - api_client_mock = mock.MagicMock() - api_client_mock.execute_extension.return_value = response_mock - execute_extension_mock.return_value = api_client_mock + execute_extension_mock.return_value.content = _TEST_RESPONSE_CONTENT yield execute_extension_mock diff --git a/vertexai/_model_garden/_model_garden_models.py b/vertexai/_model_garden/_model_garden_models.py index febde9d25d..77e69e735f 100644 --- a/vertexai/_model_garden/_model_garden_models.py +++ b/vertexai/_model_garden/_model_garden_models.py @@ -25,9 +25,6 @@ from google.cloud.aiplatform import models as aiplatform_models from google.cloud.aiplatform import _publisher_models -# this is needed for class registration to _SUBCLASSES -import vertexai # pylint:disable=unused-import - _SUPPORTED_PUBLISHERS = ["google"] _SHORT_MODEL_ID_TO_TUNING_PIPELINE_MAP = { diff --git a/vertexai/extensions/_extensions.py b/vertexai/extensions/_extensions.py index 0f0ef61351..a046cbd535 100644 --- a/vertexai/extensions/_extensions.py +++ b/vertexai/extensions/_extensions.py @@ -302,8 +302,8 @@ def _try_parse_execution_response( ) -> Union[_utils.JsonDict, str]: content: str = response.content try: - content = json.loads(response.content) - except: + content = json.loads(content) + except json.JSONDecodeError: pass return content diff --git a/vertexai/language_models/_language_models.py b/vertexai/language_models/_language_models.py index 5a60d595c9..07fee386d3 100644 --- a/vertexai/language_models/_language_models.py +++ b/vertexai/language_models/_language_models.py @@ -4163,7 +4163,7 @@ def _uri_join(uri: str, path_fragment: str) -> str: # Importing here to prevent issues caused by circular references # pylint: disable=g-import-not-at-top,g-bad-import-order -from vertexai.language_models import _distillation +from vertexai.language_models import _distillation # noqa: E402 class _PreviewTextGenerationModel( diff --git a/vertexai/preview/language_models.py b/vertexai/preview/language_models.py index 7fd673b924..c2c070677d 100644 --- a/vertexai/preview/language_models.py +++ b/vertexai/preview/language_models.py @@ -23,9 +23,6 @@ _PreviewTextEmbeddingModel, _PreviewTextGenerationModel, ChatMessage, - ChatModel, - ChatSession, - CodeChatSession, CountTokensResponse, InputOutputTextPair, TextEmbedding, diff --git a/vertexai/reasoning_engines/_reasoning_engines.py b/vertexai/reasoning_engines/_reasoning_engines.py index 3f4bc65f9c..9b0587a3d7 100644 --- a/vertexai/reasoning_engines/_reasoning_engines.py +++ b/vertexai/reasoning_engines/_reasoning_engines.py @@ -22,6 +22,7 @@ import typing from typing import Optional, Protocol, Sequence, Union +from google.api_core import exceptions from google.cloud.aiplatform import base from google.cloud.aiplatform import initializer from google.cloud.aiplatform import utils as aip_utils @@ -378,10 +379,6 @@ def _prepare( use for staging the artifacts needed. extra_packages (Sequence[str]): The set of extra user-provided packages. """ - try: - from google.cloud.exceptions import NotFound - except: - NotFound = Exception storage = _utils._import_cloud_storage_or_raise() cloudpickle = _utils._import_cloudpickle_or_raise() storage_client = storage.Client(project=project) @@ -389,7 +386,7 @@ def _prepare( try: gcs_bucket = storage_client.get_bucket(staging_bucket) _LOGGER.info(f"Using bucket {staging_bucket}") - except NotFound: + except exceptions.NotFound: new_bucket = storage_client.bucket(staging_bucket) gcs_bucket = storage_client.create_bucket(new_bucket, location=location) _LOGGER.info(f"Creating bucket {staging_bucket} in {location=}") diff --git a/vertexai/tuning/_tuning.py b/vertexai/tuning/_tuning.py index 528fb715ae..22a627c234 100644 --- a/vertexai/tuning/_tuning.py +++ b/vertexai/tuning/_tuning.py @@ -15,8 +15,7 @@ # pylint: disable=protected-access """Classes to support Tuning.""" -import typing -from typing import Dict, List, Optional, Union, Sequence +from typing import Dict, List, Optional, Union from google.auth import credentials as auth_credentials From 28a3c56fdcfa4fab819e8f79d235f6576febdfce Mon Sep 17 00:00:00 2001 From: Yeesian Ng Date: Fri, 31 May 2024 07:23:39 -0700 Subject: [PATCH 04/36] feat: Support VertexTool in langchain template. PiperOrigin-RevId: 639027200 --- ...st_reasoning_engine_templates_langchain.py | 18 ++++++++-- .../reasoning_engines/templates/langchain.py | 35 ++++++++++++++----- 2 files changed, 42 insertions(+), 11 deletions(-) diff --git a/tests/unit/vertex_langchain/test_reasoning_engine_templates_langchain.py b/tests/unit/vertex_langchain/test_reasoning_engine_templates_langchain.py index 2ba30fc834..1dc3902823 100644 --- a/tests/unit/vertex_langchain/test_reasoning_engine_templates_langchain.py +++ b/tests/unit/vertex_langchain/test_reasoning_engine_templates_langchain.py @@ -21,6 +21,8 @@ import vertexai from google.cloud.aiplatform import initializer from vertexai.preview import reasoning_engines +from vertexai.preview.generative_models import grounding +from vertexai.generative_models import Tool import pytest @@ -81,6 +83,12 @@ def langchain_dump_mock(): yield langchain_dump_mock +@pytest.fixture +def mock_chatvertexai(): + with mock.patch("langchain_google_vertexai.ChatVertexAI") as model_mock: + yield model_mock + + @pytest.mark.usefixtures("google_auth_mock") class TestLangchainAgent: def setup_method(self): @@ -113,10 +121,11 @@ def test_initialization(self): assert agent._location == _TEST_LOCATION assert agent._runnable is None - def test_initialization_with_tools(self): + def test_initialization_with_tools(self, mock_chatvertexai): tools = [ place_tool_query, StructuredTool.from_function(place_photo_query), + Tool.from_google_search_retrieval(grounding.GoogleSearchRetrieval()), ] agent = reasoning_engines.LangchainAgent( model=_TEST_MODEL, @@ -124,8 +133,11 @@ def test_initialization_with_tools(self): ) for tool, agent_tool in zip(tools, agent._tools): assert isinstance(agent_tool, type(tool)) + assert agent._runnable is None + agent.set_up() + assert agent._runnable is not None - def test_set_up(self, vertexai_init_mock): + def test_set_up(self): agent = reasoning_engines.LangchainAgent( model=_TEST_MODEL, prompt=self.prompt, @@ -135,7 +147,7 @@ def test_set_up(self, vertexai_init_mock): agent.set_up() assert agent._runnable is not None - def test_clone(self, vertexai_init_mock): + def test_clone(self): agent = reasoning_engines.LangchainAgent( model=_TEST_MODEL, prompt=self.prompt, diff --git a/vertexai/preview/reasoning_engines/templates/langchain.py b/vertexai/preview/reasoning_engines/templates/langchain.py index 10df70afa6..10f8969008 100644 --- a/vertexai/preview/reasoning_engines/templates/langchain.py +++ b/vertexai/preview/reasoning_engines/templates/langchain.py @@ -42,6 +42,13 @@ RunnableConfig = Any RunnableSerializable = Any + try: + from langchain_google_vertexai.functions_utils import _ToolsType + + _ToolLike = _ToolsType + except ImportError: + _ToolLike = Any + def _default_runnable_kwargs(has_history: bool) -> Mapping[str, Any]: # https://github.com/langchain-ai/langchain/blob/5784dfed001730530637793bea1795d9d5a7c244/libs/core/langchain_core/runnables/history.py#L237-L241 @@ -62,7 +69,13 @@ def _default_runnable_kwargs(has_history: bool) -> Mapping[str, Any]: def _default_output_parser(): - from langchain.agents.output_parsers.tools import ToolsAgentOutputParser + try: + from langchain.agents.output_parsers.tools import ToolsAgentOutputParser + except (ModuleNotFoundError, ImportError): + # Fallback to an older version if needed. + from langchain.agents.output_parsers.openai_tools import ( + OpenAIToolsAgentOutputParser as ToolsAgentOutputParser, + ) return ToolsAgentOutputParser() @@ -90,7 +103,7 @@ def _default_model_builder( def _default_runnable_builder( model: "BaseLanguageModel", *, - tools: Optional[Sequence[Union[Callable, "BaseTool"]]] = None, + tools: Optional[Sequence["_ToolLike"]] = None, prompt: Optional["RunnableSerializable"] = None, output_parser: Optional["RunnableSerializable"] = None, chat_history: Optional["GetSessionHistoryCallable"] = None, @@ -123,6 +136,7 @@ def _default_runnable_builder( if isinstance(tool, lc_tools.BaseTool) else StructuredTool.from_function(tool) for tool in tools + if isinstance(tool, (Callable, lc_tools.BaseTool)) ], **agent_executor_kwargs, ) @@ -139,7 +153,14 @@ def _default_runnable_builder( def _default_prompt(has_history: bool) -> "RunnableSerializable": from langchain_core import prompts - from langchain.agents.format_scratchpad.tools import format_to_tool_messages + + try: + from langchain.agents.format_scratchpad.tools import format_to_tool_messages + except (ModuleNotFoundError, ImportError): + # Fallback to an older version if needed. + from langchain.agents.format_scratchpad.openai_tools import ( + format_to_openai_tool_messages as format_to_tool_messages, + ) if has_history: return { @@ -186,12 +207,10 @@ def _validate_callable_parameters_are_annotated(callable: Callable): ) -def _validate_tools(tools: Sequence[Union[Callable, "BaseTool"]]): +def _validate_tools(tools: Sequence["_ToolLike"]): """Validates that the tools are usable for tool calling.""" - from langchain_core import tools as lc_tools - for tool in tools: - if not isinstance(tool, lc_tools.BaseTool): + if isinstance(tool, Callable): _validate_callable_parameters_are_annotated(tool) @@ -208,7 +227,7 @@ def __init__( model: str, *, prompt: Optional["RunnableSerializable"] = None, - tools: Optional[Sequence[Union[Callable, "BaseTool"]]] = None, + tools: Optional[Sequence["_ToolLike"]] = None, output_parser: Optional["RunnableSerializable"] = None, chat_history: Optional["GetSessionHistoryCallable"] = None, model_kwargs: Optional[Mapping[str, Any]] = None, From ba6582856b1d7f9a6ac8f90a3fa5ea6723ac64ab Mon Sep 17 00:00:00 2001 From: A Vertex SDK engineer Date: Fri, 31 May 2024 14:06:59 -0700 Subject: [PATCH 05/36] feat: Add display experiment run button for Ipython environments PiperOrigin-RevId: 639147169 --- google/cloud/aiplatform/metadata/metadata.py | 2 ++ .../cloud/aiplatform/utils/_ipython_utils.py | 31 +++++++++++++++++++ 2 files changed, 33 insertions(+) diff --git a/google/cloud/aiplatform/metadata/metadata.py b/google/cloud/aiplatform/metadata/metadata.py index 08f1235820..2226d96e20 100644 --- a/google/cloud/aiplatform/metadata/metadata.py +++ b/google/cloud/aiplatform/metadata/metadata.py @@ -492,6 +492,8 @@ def start_run( run_name=run, experiment=self.experiment, tensorboard=tensorboard ) + _ipython_utils.display_experiment_run_button(self._experiment_run) + return self._experiment_run def end_run( diff --git a/google/cloud/aiplatform/utils/_ipython_utils.py b/google/cloud/aiplatform/utils/_ipython_utils.py index fa5705f31c..584c47c368 100644 --- a/google/cloud/aiplatform/utils/_ipython_utils.py +++ b/google/cloud/aiplatform/utils/_ipython_utils.py @@ -24,6 +24,7 @@ if typing.TYPE_CHECKING: from google.cloud.aiplatform.metadata import experiment_resources + from google.cloud.aiplatform.metadata import experiment_run_resource from google.cloud.aiplatform import model_evaluation _LOGGER = base.Logger(__name__) @@ -168,6 +169,36 @@ def display_experiment_button(experiment: "experiment_resources.Experiment") -> display_link("View Experiment", uri, "science") +def display_experiment_run_button( + experiment_run: "experiment_run_resource.ExperimentRun", +) -> None: + """Function to generate a link bound to the Vertex experiment run""" + if not is_ipython_available(): + return + try: + project = experiment_run.project + location = experiment_run.location + experiment_name = experiment_run._experiment._metadata_context.name + run_name = experiment_run.name + if ( + run_name is None + or experiment_name is None + or project is None + or location is None + ): + return + except AttributeError: + _LOGGER.warning("Unable to fetch experiment run metadata") + return + + uri = ( + "https://console.cloud.google.com/vertex-ai/experiments/locations/" + + f"{location}/experiments/{experiment_name}/" + + f"runs/{experiment_name}-{run_name}?project={project}" + ) + display_link("View Experiment Run", uri, "science") + + def display_model_evaluation_button( evaluation: "model_evaluation.ModelEvaluation", ) -> None: From 88c6a6a4f11285d429c3777f59101e53e4672185 Mon Sep 17 00:00:00 2001 From: Amy Wu Date: Fri, 31 May 2024 16:16:17 -0700 Subject: [PATCH 06/36] fix: Generalize RAG files import from Google Drive PiperOrigin-RevId: 639184120 --- tests/unit/vertex_rag/test_rag_constants.py | 3 +++ tests/unit/vertex_rag/test_rag_data.py | 6 +++--- vertexai/preview/rag/utils/_gapic_utils.py | 6 ++++-- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/tests/unit/vertex_rag/test_rag_constants.py b/tests/unit/vertex_rag/test_rag_constants.py index 847cefa31e..7aa8449657 100644 --- a/tests/unit/vertex_rag/test_rag_constants.py +++ b/tests/unit/vertex_rag/test_rag_constants.py @@ -91,6 +91,9 @@ TEST_DRIVE_FOLDER = ( f"https://drive.google.com/corp/drive/folders/{TEST_DRIVE_FOLDER_ID}" ) +TEST_DRIVE_FOLDER_2 = ( + f"https://drive.google.com/drive/folders/{TEST_DRIVE_FOLDER_ID}?resourcekey=0-eiOT3" +) TEST_IMPORT_FILES_CONFIG_DRIVE_FOLDER = ImportRagFilesConfig() TEST_IMPORT_FILES_CONFIG_DRIVE_FOLDER.google_drive_source.resource_ids = [ GoogleDriveSource.ResourceId( diff --git a/tests/unit/vertex_rag/test_rag_data.py b/tests/unit/vertex_rag/test_rag_data.py index 6b2f3e98f7..06a4645d64 100644 --- a/tests/unit/vertex_rag/test_rag_data.py +++ b/tests/unit/vertex_rag/test_rag_data.py @@ -374,11 +374,11 @@ def test_prepare_import_files_request_list_gcs_uris(self): ) import_files_request_eq(request, tc.TEST_IMPORT_REQUEST_GCS) - def test_prepare_import_files_request_drive_folders(self): - paths = [tc.TEST_DRIVE_FOLDER] + @pytest.mark.parametrize("path", [tc.TEST_DRIVE_FOLDER, tc.TEST_DRIVE_FOLDER_2]) + def test_prepare_import_files_request_drive_folders(self, path): request = prepare_import_files_request( corpus_name=tc.TEST_RAG_CORPUS_RESOURCE_NAME, - paths=paths, + paths=[path], chunk_size=tc.TEST_CHUNK_SIZE, chunk_overlap=tc.TEST_CHUNK_OVERLAP, ) diff --git a/vertexai/preview/rag/utils/_gapic_utils.py b/vertexai/preview/rag/utils/_gapic_utils.py index 6a8510ff23..c656c13c11 100644 --- a/vertexai/preview/rag/utils/_gapic_utils.py +++ b/vertexai/preview/rag/utils/_gapic_utils.py @@ -97,10 +97,12 @@ def convert_path_to_resource_id( # Google Drive source path_list = path.split("/") if "file" in path_list: - resource_id = path_list[5] + index = path_list.index("file") + 2 + resource_id = path_list[index].split("?")[0] resource_type = GoogleDriveSource.ResourceId.ResourceType.RESOURCE_TYPE_FILE elif "folders" in path_list: - resource_id = path_list[6] + index = path_list.index("folders") + 1 + resource_id = path_list[index].split("?")[0] resource_type = ( GoogleDriveSource.ResourceId.ResourceType.RESOURCE_TYPE_FOLDER ) From 51431d7d07d93ee094888d1e11ba378d8ea7c41f Mon Sep 17 00:00:00 2001 From: Amy Wu Date: Mon, 3 Jun 2024 14:06:02 -0700 Subject: [PATCH 07/36] chore: remove Ray 2.4 unit tests in presubmit check for future deprecation PiperOrigin-RevId: 639905468 --- .github/sync-repo-settings.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/sync-repo-settings.yaml b/.github/sync-repo-settings.yaml index 9549b6901c..0f0ae6cfb0 100644 --- a/.github/sync-repo-settings.yaml +++ b/.github/sync-repo-settings.yaml @@ -20,7 +20,6 @@ branchProtectionRules: - 'Presubmit - Unit Tests Python 3.10' - 'Presubmit - Unit Tests Python 3.11' - 'Presubmit - Unit Tests Python 3.12' - - 'Presubmit - Unit Tests Ray 2.4.0' - 'Presubmit - Unit Tests Ray 2.9.3' - 'Presubmit - Unit Tests LangChain (Python 3.8)' - 'Presubmit - Unit Tests LangChain (Python 3.9)' From bc8b14a7c9c632721db9166dc9b63eec17d31afd Mon Sep 17 00:00:00 2001 From: Matthew Tang Date: Mon, 3 Jun 2024 20:54:42 -0700 Subject: [PATCH 08/36] feat: GenAI - Release ToolConfig to GA PiperOrigin-RevId: 640001644 --- tests/unit/vertexai/test_generative_models.py | 2 +- vertexai/generative_models/__init__.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/unit/vertexai/test_generative_models.py b/tests/unit/vertexai/test_generative_models.py index 391e30e554..36a2c1dc72 100644 --- a/tests/unit/vertexai/test_generative_models.py +++ b/tests/unit/vertexai/test_generative_models.py @@ -749,7 +749,7 @@ def test_chat_function_calling(self, generative_models: generative_models): ) @pytest.mark.parametrize( "generative_models", - [preview_generative_models], + [generative_models, preview_generative_models], ) def test_chat_forced_function_calling(self, generative_models: generative_models): get_current_weather_func = generative_models.FunctionDeclaration( diff --git a/vertexai/generative_models/__init__.py b/vertexai/generative_models/__init__.py index 6c3eb34ae9..71f0d07cab 100644 --- a/vertexai/generative_models/__init__.py +++ b/vertexai/generative_models/__init__.py @@ -32,6 +32,7 @@ ResponseValidationError, SafetySetting, Tool, + ToolConfig, grounding, ) @@ -51,5 +52,6 @@ "ResponseValidationError", "SafetySetting", "Tool", + "ToolConfig", "grounding", ] From 6c14e8b31bd950ac4f4a862b4e62ead42fe30463 Mon Sep 17 00:00:00 2001 From: A Vertex SDK engineer Date: Tue, 4 Jun 2024 11:29:10 -0700 Subject: [PATCH 09/36] feat: sample code for Vertex AI Feature Store PiperOrigin-RevId: 640224428 --- samples/model-builder/conftest.py | 25 +++++++++++++ ...te_bigtable_feature_online_store_sample.py | 33 +++++++++++++++++ ...gtable_feature_online_store_sample_test.py | 35 +++++++++++++++++++ ...ized_public_feature_online_store_sample.py | 33 +++++++++++++++++ ...public_feature_online_store_sample_test.py | 35 +++++++++++++++++++ samples/model-builder/test_constants.py | 7 ++-- vertexai/__init__.py | 2 ++ vertexai/resources/__init__.py | 2 ++ 8 files changed, 170 insertions(+), 2 deletions(-) create mode 100644 samples/model-builder/feature_store/create_bigtable_feature_online_store_sample.py create mode 100644 samples/model-builder/feature_store/create_bigtable_feature_online_store_sample_test.py create mode 100644 samples/model-builder/feature_store/create_optimized_public_feature_online_store_sample.py create mode 100644 samples/model-builder/feature_store/create_optimized_public_feature_online_store_sample_test.py diff --git a/samples/model-builder/conftest.py b/samples/model-builder/conftest.py index 398f3ac7bc..949b620d22 100644 --- a/samples/model-builder/conftest.py +++ b/samples/model-builder/conftest.py @@ -15,6 +15,7 @@ from unittest.mock import MagicMock, patch from google.cloud import aiplatform +import vertexai import pytest @@ -691,6 +692,30 @@ def mock_write_feature_values(mock_entity_type): yield mock_write_feature_values +@pytest.fixture +def mock_feature_online_store(): + mock = MagicMock(vertexai.resources.preview.FeatureOnlineStore) + yield mock + + +@pytest.fixture +def mock_create_feature_online_store(mock_feature_online_store): + with patch.object( + vertexai.resources.preview.FeatureOnlineStore, "create_bigtable_store" + ) as mock_create_feature_online_store: + mock_create_feature_online_store.return_value = mock_feature_online_store + yield mock_create_feature_online_store + + +@pytest.fixture +def mock_create_optimized_public_online_store(mock_feature_online_store): + with patch.object( + vertexai.resources.preview.FeatureOnlineStore, "create_optimized_store" + ) as mock_create_optimized_store: + mock_create_optimized_store.return_value = mock_feature_online_store + yield mock_create_optimized_store + + """ ---------------------------------------------------------------------------- Experiment Tracking Fixtures diff --git a/samples/model-builder/feature_store/create_bigtable_feature_online_store_sample.py b/samples/model-builder/feature_store/create_bigtable_feature_online_store_sample.py new file mode 100644 index 0000000000..69e883ee70 --- /dev/null +++ b/samples/model-builder/feature_store/create_bigtable_feature_online_store_sample.py @@ -0,0 +1,33 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START aiplatform_sdk_create_bigtable_feature_online_store_sample] + +from google.cloud import aiplatform +import vertexai + + +def create_bigtable_feature_online_store_sample( + project: str, + location: str, + feature_online_store_id: str, +): + aiplatform.init(project=project, location=location) + fos = vertexai.resources.preview.FeatureOnlineStore.create_bigtable_store( + feature_online_store_id + ) + return fos + + +# [END aiplatform_sdk_create_bigtable_feature_online_store_sample] diff --git a/samples/model-builder/feature_store/create_bigtable_feature_online_store_sample_test.py b/samples/model-builder/feature_store/create_bigtable_feature_online_store_sample_test.py new file mode 100644 index 0000000000..075b7e836c --- /dev/null +++ b/samples/model-builder/feature_store/create_bigtable_feature_online_store_sample_test.py @@ -0,0 +1,35 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from feature_store import create_bigtable_feature_online_store_sample + +import test_constants as constants + + +def test_create_bigtable_feature_online_store_sample( + mock_sdk_init, mock_create_feature_online_store +): + create_bigtable_feature_online_store_sample.create_bigtable_feature_online_store_sample( + project=constants.PROJECT, + location=constants.LOCATION, + feature_online_store_id=constants.FEATURE_ONLINE_STORE_ID, + ) + + mock_sdk_init.assert_called_once_with( + project=constants.PROJECT, location=constants.LOCATION + ) + + mock_create_feature_online_store.assert_called_once_with( + constants.FEATURE_ONLINE_STORE_ID + ) diff --git a/samples/model-builder/feature_store/create_optimized_public_feature_online_store_sample.py b/samples/model-builder/feature_store/create_optimized_public_feature_online_store_sample.py new file mode 100644 index 0000000000..45a3f177dd --- /dev/null +++ b/samples/model-builder/feature_store/create_optimized_public_feature_online_store_sample.py @@ -0,0 +1,33 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START aiplatform_sdk_create_optimized_public_feature_online_store_sample] + +from google.cloud import aiplatform +import vertexai + + +def create_optimized_public_feature_online_store_sample( + project: str, + location: str, + feature_online_store_id: str, +): + aiplatform.init(project=project, location=location) + fos = vertexai.resources.preview.FeatureOnlineStore.create_optimized_store( + feature_online_store_id + ) + return fos + + +# [END aiplatform_sdk_create_optimized_public_feature_online_store_sample] diff --git a/samples/model-builder/feature_store/create_optimized_public_feature_online_store_sample_test.py b/samples/model-builder/feature_store/create_optimized_public_feature_online_store_sample_test.py new file mode 100644 index 0000000000..568527523e --- /dev/null +++ b/samples/model-builder/feature_store/create_optimized_public_feature_online_store_sample_test.py @@ -0,0 +1,35 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from feature_store import create_optimized_public_feature_online_store_sample +import test_constants as constants + + +def test_create_optimized_feature_online_store_sample( + mock_sdk_init, mock_create_optimized_public_online_store +): + + create_optimized_public_feature_online_store_sample.create_optimized_public_feature_online_store_sample( + project=constants.PROJECT, + location=constants.LOCATION, + feature_online_store_id=constants.FEATURE_ONLINE_STORE_ID, + ) + + mock_sdk_init.assert_called_once_with( + project=constants.PROJECT, location=constants.LOCATION + ) + + mock_create_optimized_public_online_store.assert_called_once_with( + constants.FEATURE_ONLINE_STORE_ID + ) diff --git a/samples/model-builder/test_constants.py b/samples/model-builder/test_constants.py index 210256913d..cc62f17144 100644 --- a/samples/model-builder/test_constants.py +++ b/samples/model-builder/test_constants.py @@ -15,9 +15,9 @@ from random import randint from uuid import uuid4 +from google.protobuf import timestamp_pb2 from google.auth import credentials from google.cloud import aiplatform -from google.protobuf import timestamp_pb2 PROJECT = "abc" LOCATION = "us-central1" @@ -208,7 +208,7 @@ PYTHON_MODULE_NAME = "trainer.task" MODEL_TYPE = "CLOUD" -# Feature store constants +# Feature store constants (legacy) FEATURESTORE_ID = "movie_prediction" FEATURESTORE_NAME = ( f"projects/{PROJECT}/locations/{LOCATION}/featurestores/{FEATURESTORE_ID}" @@ -252,6 +252,9 @@ GCS_SOURCE_TYPE = "avro" WORKER_COUNT = 1 +# Feature online store constants +FEATURE_ONLINE_STORE_ID = "sample_feature_online_store" + TABULAR_TARGET_COLUMN = "target_column" FORECASTNG_TIME_COLUMN = "date" FORECASTNG_TIME_SERIES_IDENTIFIER_COLUMN = "time_series_id" diff --git a/vertexai/__init__.py b/vertexai/__init__.py index 8f73185c54..c2558bad09 100644 --- a/vertexai/__init__.py +++ b/vertexai/__init__.py @@ -20,8 +20,10 @@ from google.cloud.aiplatform import init from vertexai import preview +from vertexai import resources __all__ = [ "init", "preview", + "resources", ] diff --git a/vertexai/resources/__init__.py b/vertexai/resources/__init__.py index f3b85e0c45..17b546129d 100644 --- a/vertexai/resources/__init__.py +++ b/vertexai/resources/__init__.py @@ -17,6 +17,7 @@ """The vertexai resources module.""" from google.cloud.aiplatform import initializer +from vertexai.resources import preview from google.cloud.aiplatform.datasets import ( ImageDataset, @@ -177,4 +178,5 @@ "TimeSeriesDataset", "TimeSeriesDenseEncoderForecastingTrainingJob", "VideoDataset", + "preview", ) From e12dfe5241dce8fb65648e364df5e0c915d33097 Mon Sep 17 00:00:00 2001 From: Amy Wu Date: Tue, 4 Jun 2024 11:38:03 -0700 Subject: [PATCH 10/36] BREAKING_CHANGE: deprecate Ray 2.4 PiperOrigin-RevId: 640228257 --- .kokoro/presubmit/unit_ray_2-4.cfg | 13 --------- .../aiplatform/vertex_ray/cluster_init.py | 19 +++++++------ google/cloud/aiplatform/vertex_ray/data.py | 13 ++------- .../vertex_ray/predict/sklearn/register.py | 17 ++--------- .../vertex_ray/predict/tensorflow/register.py | 17 ++--------- .../vertex_ray/predict/torch/register.py | 17 ++--------- .../vertex_ray/predict/xgboost/register.py | 17 ++--------- .../vertex_ray/util/_gapic_utils.py | 8 +++--- .../vertex_ray/util/_validation_utils.py | 2 +- .../aiplatform/vertex_ray/util/resources.py | 10 +++---- noxfile.py | 2 +- setup.py | 8 ++---- .../vertex_ray/test_cluster_management.py | 4 +-- .../test_job_submission_dashboard.py | 4 +-- tests/system/vertex_ray/test_ray_data.py | 28 ++----------------- tests/unit/vertex_ray/conftest.py | 8 +++--- tests/unit/vertex_ray/test_cluster_init.py | 17 ++++++++--- tests/unit/vertex_ray/test_constants.py | 18 ++++++------ tests/unit/vertex_ray/test_ray_prediction.py | 5 ---- 19 files changed, 69 insertions(+), 158 deletions(-) delete mode 100644 .kokoro/presubmit/unit_ray_2-4.cfg diff --git a/.kokoro/presubmit/unit_ray_2-4.cfg b/.kokoro/presubmit/unit_ray_2-4.cfg deleted file mode 100644 index 54a4f83463..0000000000 --- a/.kokoro/presubmit/unit_ray_2-4.cfg +++ /dev/null @@ -1,13 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Run unit tests for Ray 2.4.0 on Python 3.10 -env_vars: { - key: "NOX_SESSION" - value: "unit_ray(ray='2.4.0')" -} - -# Run unit tests in parallel, splitting up by file -env_vars: { - key: "PYTEST_ADDOPTS" - value: "-n=auto --dist=loadscope" -} diff --git a/google/cloud/aiplatform/vertex_ray/cluster_init.py b/google/cloud/aiplatform/vertex_ray/cluster_init.py index c510c377fd..1894847edc 100644 --- a/google/cloud/aiplatform/vertex_ray/cluster_init.py +++ b/google/cloud/aiplatform/vertex_ray/cluster_init.py @@ -19,14 +19,13 @@ import logging import time from typing import Dict, List, Optional -import warnings from google.cloud.aiplatform import initializer from google.cloud.aiplatform import utils from google.cloud.aiplatform.utils import resource_manager_utils -from google.cloud.aiplatform_v1beta1.types import persistent_resource_service +from google.cloud.aiplatform_v1.types import persistent_resource_service -from google.cloud.aiplatform_v1beta1.types.persistent_resource import ( +from google.cloud.aiplatform_v1.types.persistent_resource import ( PersistentResource, RaySpec, RayMetricSpec, @@ -42,6 +41,9 @@ ) from google.protobuf import field_mask_pb2 # type: ignore +from google.cloud.aiplatform.vertex_ray.util._validation_utils import ( + _V2_4_WARNING_MESSAGE, +) def create_ray_cluster( @@ -51,7 +53,7 @@ def create_ray_cluster( network: Optional[str] = None, service_account: Optional[str] = None, cluster_name: Optional[str] = None, - worker_node_types: Optional[List[resources.Resources]] = None, + worker_node_types: Optional[List[resources.Resources]] = [resources.Resources()], custom_images: Optional[resources.NodeImages] = None, enable_metrics_collection: Optional[bool] = True, labels: Optional[Dict[str, str]] = None, @@ -128,6 +130,9 @@ def create_ray_cluster( Returns: The cluster_resource_name of the initiated Ray cluster on Vertex. + Raise: + ValueError: If the cluster is not created successfully. + RuntimeError: If the ray_version is 2.4. """ if network is None: @@ -135,13 +140,11 @@ def create_ray_cluster( "[Ray on Vertex]: No VPC network configured. It is required for client connection." ) if ray_version == "2.4": - warnings.warn( - _validation_utils._V2_4_WARNING_MESSAGE, DeprecationWarning, stacklevel=2 - ) + raise RuntimeError(_V2_4_WARNING_MESSAGE) local_ray_verion = _validation_utils.get_local_ray_version() if ray_version != local_ray_verion: if custom_images is None and head_node_type.custom_image is None: - install_ray_version = "2.9.3" if ray_version == "2.9" else "2.4.0" + install_ray_version = "2.9.3" logging.info( "[Ray on Vertex]: Local runtime has Ray version %s" ", but the requested cluster runtime has %s. Please " diff --git a/google/cloud/aiplatform/vertex_ray/data.py b/google/cloud/aiplatform/vertex_ray/data.py index da01814b75..f4fbe98238 100644 --- a/google/cloud/aiplatform/vertex_ray/data.py +++ b/google/cloud/aiplatform/vertex_ray/data.py @@ -18,7 +18,6 @@ import ray.data from ray.data.dataset import Dataset from typing import Any, Dict, Optional -import warnings from google.cloud.aiplatform.vertex_ray.bigquery_datasource import ( BigQueryDatasource, @@ -43,7 +42,6 @@ def read_bigquery( *, parallelism: int = -1, ) -> Dataset: - # The read is identical in Ray 2.4 and 2.9 return ray.data.read_datasource( BigQueryDatasource(), project_id=project_id, @@ -61,13 +59,8 @@ def write_bigquery( ray_remote_args: Dict[str, Any] = None, ) -> Any: if ray.__version__ == "2.4.0": - warnings.warn(_V2_4_WARNING_MESSAGE, DeprecationWarning, stacklevel=2) - return ds.write_datasource( - BigQueryDatasource(), - project_id=project_id, - dataset=dataset, - max_retry_cnt=max_retry_cnt, - ) + raise RuntimeError(_V2_4_WARNING_MESSAGE) + elif ray.__version__ == "2.9.3": if ray_remote_args is None: ray_remote_args = {} @@ -89,5 +82,5 @@ def write_bigquery( else: raise ImportError( f"[Ray on Vertex AI]: Unsupported version {ray.__version__}." - + "Only 2.4.0 and 2.9.3 are supported." + + "Only 2.9.3 is supported." ) diff --git a/google/cloud/aiplatform/vertex_ray/predict/sklearn/register.py b/google/cloud/aiplatform/vertex_ray/predict/sklearn/register.py index 489e8f8d8e..4058e6265d 100644 --- a/google/cloud/aiplatform/vertex_ray/predict/sklearn/register.py +++ b/google/cloud/aiplatform/vertex_ray/predict/sklearn/register.py @@ -17,14 +17,12 @@ # limitations under the License. # -import logging import os import pickle import ray import ray.cloudpickle as cpickle import tempfile from typing import Optional, TYPE_CHECKING -import warnings from google.cloud import aiplatform from google.cloud.aiplatform import initializer @@ -123,23 +121,12 @@ def _get_estimator_from( Raises: ValueError: Invalid Argument. RuntimeError: Model not found. + RuntimeError: Ray version 2.4 is not supported. """ ray_version = ray.__version__ if ray_version == "2.4.0": - warnings.warn(_V2_4_WARNING_MESSAGE, DeprecationWarning, stacklevel=2) - if not isinstance(checkpoint, ray_sklearn.SklearnCheckpoint): - raise ValueError( - "[Ray on Vertex AI]: arg checkpoint should be a" - " ray.train.sklearn.SklearnCheckpoint instance" - ) - if checkpoint.get_preprocessor() is not None: - logging.warning( - "Checkpoint contains preprocessor. However, converting from a Ray" - " Checkpoint to framework specific model does NOT support" - " preprocessing. The model will be exported without preprocessors." - ) - return checkpoint.get_estimator() + raise RuntimeError(_V2_4_WARNING_MESSAGE) try: return checkpoint.get_model() diff --git a/google/cloud/aiplatform/vertex_ray/predict/tensorflow/register.py b/google/cloud/aiplatform/vertex_ray/predict/tensorflow/register.py index 9fc502ecc7..6d25bf7b00 100644 --- a/google/cloud/aiplatform/vertex_ray/predict/tensorflow/register.py +++ b/google/cloud/aiplatform/vertex_ray/predict/tensorflow/register.py @@ -20,7 +20,6 @@ import logging import ray from typing import Callable, Optional, Union, TYPE_CHECKING -import warnings from google.cloud import aiplatform from google.cloud.aiplatform import initializer @@ -142,23 +141,11 @@ def _get_tensorflow_model_from( Raises: ValueError: Invalid Argument. + RuntimeError: Ray version 2.4.0 is not supported. """ ray_version = ray.__version__ if ray_version == "2.4.0": - warnings.warn(_V2_4_WARNING_MESSAGE, DeprecationWarning, stacklevel=2) - if not isinstance(checkpoint, ray_tensorflow.TensorflowCheckpoint): - raise ValueError( - "[Ray on Vertex AI]: arg checkpoint should be a" - " ray.train.tensorflow.TensorflowCheckpoint instance" - ) - if checkpoint.get_preprocessor() is not None: - logging.warning( - "Checkpoint contains preprocessor. However, converting from a Ray" - " Checkpoint to framework specific model does NOT support" - " preprocessing. The model will be exported without preprocessors." - ) - - return checkpoint.get_model(model) + raise RuntimeError(_V2_4_WARNING_MESSAGE) try: import tensorflow as tf diff --git a/google/cloud/aiplatform/vertex_ray/predict/torch/register.py b/google/cloud/aiplatform/vertex_ray/predict/torch/register.py index 06c83ba4a9..a91da66084 100644 --- a/google/cloud/aiplatform/vertex_ray/predict/torch/register.py +++ b/google/cloud/aiplatform/vertex_ray/predict/torch/register.py @@ -15,7 +15,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging import os import ray from ray.air._internal.torch_utils import load_torch_model @@ -25,7 +24,6 @@ ) from google.cloud.aiplatform.utils import gcs_utils from typing import Optional -import warnings try: @@ -62,22 +60,11 @@ def get_pytorch_model_from( ValueError: Invalid Argument. ModuleNotFoundError: PyTorch isn't installed. RuntimeError: Model not found. + RuntimeError: Ray version 2.4 is not supported. """ ray_version = ray.__version__ if ray_version == "2.4.0": - warnings.warn(_V2_4_WARNING_MESSAGE, DeprecationWarning, stacklevel=2) - if not isinstance(checkpoint, ray_torch.TorchCheckpoint): - raise ValueError( - "[Ray on Vertex AI]: arg checkpoint should be a" - " ray.train.torch.TorchCheckpoint instance" - ) - if checkpoint.get_preprocessor() is not None: - logging.warning( - "Checkpoint contains preprocessor. However, converting from a Ray" - " Checkpoint to framework specific model does NOT support" - " preprocessing. The model will be exported without preprocessors." - ) - return checkpoint.get_model(model=model) + raise RuntimeError(_V2_4_WARNING_MESSAGE) try: return checkpoint.get_model() diff --git a/google/cloud/aiplatform/vertex_ray/predict/xgboost/register.py b/google/cloud/aiplatform/vertex_ray/predict/xgboost/register.py index 669b0cbde4..c93c6ce3e9 100644 --- a/google/cloud/aiplatform/vertex_ray/predict/xgboost/register.py +++ b/google/cloud/aiplatform/vertex_ray/predict/xgboost/register.py @@ -17,13 +17,11 @@ # limitations under the License. # -import logging import os import pickle import ray import tempfile from typing import Optional, TYPE_CHECKING -import warnings from google.cloud import aiplatform from google.cloud.aiplatform import initializer @@ -134,22 +132,11 @@ def _get_xgboost_model_from( ValueError: Invalid Argument. ModuleNotFoundError: XGBoost isn't installed. RuntimeError: Model not found. + RuntimeError: Ray version 2.4 is not supported. """ ray_version = ray.__version__ if ray_version == "2.4.0": - warnings.warn(_V2_4_WARNING_MESSAGE, DeprecationWarning, stacklevel=2) - if not isinstance(checkpoint, ray_xgboost.XGBoostCheckpoint): - raise ValueError( - "[Ray on Vertex AI]: arg checkpoint should be a" - " ray.train.xgboost.XGBoostCheckpoint instance" - ) - if checkpoint.get_preprocessor() is not None: - logging.warning( - "Checkpoint contains preprocessor. However, converting from a Ray" - " Checkpoint to framework specific model does NOT support" - " preprocessing. The model will be exported without preprocessors." - ) - return checkpoint.get_model() + raise RuntimeError(_V2_4_WARNING_MESSAGE) try: # This works for Ray v2.5 diff --git a/google/cloud/aiplatform/vertex_ray/util/_gapic_utils.py b/google/cloud/aiplatform/vertex_ray/util/_gapic_utils.py index 0badbe7ca8..bfedef2db3 100644 --- a/google/cloud/aiplatform/vertex_ray/util/_gapic_utils.py +++ b/google/cloud/aiplatform/vertex_ray/util/_gapic_utils.py @@ -30,10 +30,10 @@ Cluster, Resources, ) -from google.cloud.aiplatform_v1beta1.types.persistent_resource import ( +from google.cloud.aiplatform_v1.types.persistent_resource import ( PersistentResource, ) -from google.cloud.aiplatform_v1beta1.types.persistent_resource_service import ( +from google.cloud.aiplatform_v1.types.persistent_resource_service import ( GetPersistentResourceRequest, ) @@ -47,7 +47,7 @@ def create_persistent_resource_client(): return initializer.global_config.create_client( client_class=PersistentResourceClientWithOverride, appended_gapic_version="vertex_ray", - ).select_version("v1beta1") + ) def polling_delay(num_attempts: int, time_scale: float) -> datetime.timedelta: @@ -84,7 +84,7 @@ def get_persistent_resource( tolerance: number of attemps to get persistent resource. Returns: - aiplatform_v1beta1.PersistentResource if state is RUNNING. + aiplatform_v1.PersistentResource if state is RUNNING. Raises: ValueError: Invalid cluster resource name. diff --git a/google/cloud/aiplatform/vertex_ray/util/_validation_utils.py b/google/cloud/aiplatform/vertex_ray/util/_validation_utils.py index 0db53e5352..4cbed98d52 100644 --- a/google/cloud/aiplatform/vertex_ray/util/_validation_utils.py +++ b/google/cloud/aiplatform/vertex_ray/util/_validation_utils.py @@ -28,7 +28,7 @@ SUPPORTED_RAY_VERSIONS = immutabledict({"2.4": "2.4.0", "2.9": "2.9.3"}) SUPPORTED_PY_VERSION = ["3.10"] _V2_4_WARNING_MESSAGE = ( - "After May 30, 2024, using Ray version = 2.4 will result in an error. " + "After google-cloud-aiplatform>1.53.0, using Ray version = 2.4 will result in an error. " "Please use Ray version = 2.9.3 (default) instead." ) diff --git a/google/cloud/aiplatform/vertex_ray/util/resources.py b/google/cloud/aiplatform/vertex_ray/util/resources.py index 5575edbaf7..28f28f68fd 100644 --- a/google/cloud/aiplatform/vertex_ray/util/resources.py +++ b/google/cloud/aiplatform/vertex_ray/util/resources.py @@ -16,7 +16,7 @@ # import dataclasses from typing import Dict, List, Optional -from google.cloud.aiplatform_v1beta1.types import PersistentResource +from google.cloud.aiplatform_v1.types import PersistentResource @dataclasses.dataclass @@ -53,10 +53,10 @@ class Resources: @dataclasses.dataclass class NodeImages: """ - Custom images for a ray cluster. We currently support Ray v2.4 and python v3.10. + Custom images for a ray cluster. We currently support Ray v2.9 and python v3.10. The custom images must be extended from the following base images: - "{region}-docker.pkg.dev/vertex-ai/training/ray-cpu.2-4.py310:latest" or - "{region}-docker.pkg.dev/vertex-ai/training/ray-gpu.2-4.py310:latest". In + "{region}-docker.pkg.dev/vertex-ai/training/ray-cpu.2-9.py310:latest" or + "{region}-docker.pkg.dev/vertex-ai/training/ray-gpu.2-9.py310:latest". In order to use custom images, need to specify both head and worker images. Attributes: @@ -85,7 +85,7 @@ class Cluster: the cluster. state: Describes the cluster state (defined in PersistentResource.State). python_version: Python version for the ray cluster (e.g. "3.10"). - ray_version: Ray version for the ray cluster (e.g. "2.4"). + ray_version: Ray version for the ray cluster (e.g. "2.9"). head_node_type: The head node resource. Resources.node_count must be 1. If not set, by default it is a CPU node with machine_type of n1-standard-8. worker_node_types: The list of Resources of the worker nodes. Should not diff --git a/noxfile.py b/noxfile.py index 83293f34f8..448ee0f6b5 100644 --- a/noxfile.py +++ b/noxfile.py @@ -195,7 +195,7 @@ def unit(session): @nox.session(python="3.10") -@nox.parametrize("ray", ["2.4.0", "2.9.3"]) +@nox.parametrize("ray", ["2.9.3"]) def unit_ray(session, ray): # Install all test dependencies, then install this package in-place. diff --git a/setup.py b/setup.py index e11afd0cde..c48ac95247 100644 --- a/setup.py +++ b/setup.py @@ -101,7 +101,8 @@ ] ray_extra_require = [ - # Cluster only supports 2.4.0 and 2.9.3 + # Cluster only supports 2.9.3. Keep 2.4.0 for our testing environment. + # Note that testing is submiting a job in a cluster with Ray 2.9.3 remotely. ( "ray[default] >= 2.4, <= 2.9.3,!= 2.5.*,!= 2.6.*,!= 2.7.*,!=" " 2.8.*,!=2.9.0,!=2.9.1,!=2.9.2; python_version<'3.11'" @@ -126,10 +127,7 @@ ray_testing_extra_require = ray_extra_require + [ "pytest-xdist", # ray train extras required for prediction tests - ( - "ray[train] >= 2.4, <= 2.9.3,!= 2.5.*,!= 2.6.*,!= 2.7.*,!=" - " 2.8.*,!=2.9.0,!=2.9.1,!=2.9.2" - ), + "ray[train] == 2.9.3", # Framework version constraints copied from testing_extra_require "scikit-learn", "tensorflow", diff --git a/tests/system/vertex_ray/test_cluster_management.py b/tests/system/vertex_ray/test_cluster_management.py index 89d46446cc..8dc8b3fe33 100644 --- a/tests/system/vertex_ray/test_cluster_management.py +++ b/tests/system/vertex_ray/test_cluster_management.py @@ -23,7 +23,7 @@ import ray # Local ray version will always be 2.4 regardless of cluster version due to -# depenency conflicts +# depenency conflicts. Remote job execution's Ray version is 2.9. RAY_VERSION = "2.4.0" PROJECT_ID = "ucaip-sample-tests" @@ -31,7 +31,7 @@ class TestClusterManagement(e2e_base.TestEndToEnd): _temp_prefix = "temp-rov-cluster-management" - @pytest.mark.parametrize("cluster_ray_version", ["2.4", "2.9"]) + @pytest.mark.parametrize("cluster_ray_version", ["2.9"]) def test_cluster_management(self, cluster_ray_version): assert ray.__version__ == RAY_VERSION aiplatform.init(project=PROJECT_ID, location="us-central1") diff --git a/tests/system/vertex_ray/test_job_submission_dashboard.py b/tests/system/vertex_ray/test_job_submission_dashboard.py index b7c4256851..0056e3ef9e 100644 --- a/tests/system/vertex_ray/test_job_submission_dashboard.py +++ b/tests/system/vertex_ray/test_job_submission_dashboard.py @@ -27,7 +27,7 @@ import tempfile # Local ray version will always be 2.4 regardless of cluster version due to -# depenency conflicts +# depenency conflicts. Remote job execution's Ray version is 2.9. RAY_VERSION = "2.4.0" PROJECT_ID = "ucaip-sample-tests" @@ -35,7 +35,7 @@ class TestJobSubmissionDashboard(e2e_base.TestEndToEnd): _temp_prefix = "temp-job-submission-dashboard" - @pytest.mark.parametrize("cluster_ray_version", ["2.4", "2.9"]) + @pytest.mark.parametrize("cluster_ray_version", ["2.9"]) def test_job_submission_dashboard(self, cluster_ray_version): assert ray.__version__ == RAY_VERSION aiplatform.init(project=PROJECT_ID, location="us-central1") diff --git a/tests/system/vertex_ray/test_ray_data.py b/tests/system/vertex_ray/test_ray_data.py index 22651caa52..9b19acfc1a 100644 --- a/tests/system/vertex_ray/test_ray_data.py +++ b/tests/system/vertex_ray/test_ray_data.py @@ -27,33 +27,11 @@ import tempfile # Local ray version will always be 2.4 regardless of cluster version due to -# depenency conflicts +# depenency conflicts. Remote job execution's Ray version is 2.9. RAY_VERSION = "2.4.0" SDK_VERSION = aiplatform.__version__ PROJECT_ID = "ucaip-sample-tests" -my_script_ray24 = """ -import ray -from vertex_ray import BigQueryDatasource - -parallelism = 10 -query = "SELECT * FROM `bigquery-public-data.ml_datasets.ulb_fraud_detection` LIMIT 10000000" - -ds = ray.data.read_datasource( - BigQueryDatasource(), - parallelism=parallelism, - query=query -) -# The reads are lazy, so the end time cannot be captured until ds.fully_executed() is called -ds.fully_executed() - -# Write -ds.write_datasource( - BigQueryDatasource(), - dataset='bugbashbq1.system_test_write', -) -""" - my_script_ray29 = """ import ray import vertex_ray @@ -76,13 +54,13 @@ ) """ -my_script = {"2.4": my_script_ray24, "2.9": my_script_ray29} +my_script = {"2.9": my_script_ray29} class TestRayData(e2e_base.TestEndToEnd): _temp_prefix = "temp-ray-data" - @pytest.mark.parametrize("cluster_ray_version", ["2.4", "2.9"]) + @pytest.mark.parametrize("cluster_ray_version", ["2.9"]) def test_ray_data(self, cluster_ray_version): head_node_type = vertex_ray.Resources() worker_node_types = [ diff --git a/tests/unit/vertex_ray/conftest.py b/tests/unit/vertex_ray/conftest.py index 9bebe10e1f..de20c135e0 100644 --- a/tests/unit/vertex_ray/conftest.py +++ b/tests/unit/vertex_ray/conftest.py @@ -19,16 +19,16 @@ from google.auth import credentials as auth_credentials from google.cloud import resourcemanager from google.cloud.aiplatform import vertex_ray -from google.cloud.aiplatform_v1beta1.services.persistent_resource_service import ( +from google.cloud.aiplatform_v1.services.persistent_resource_service import ( PersistentResourceServiceClient, ) -from google.cloud.aiplatform_v1beta1.types.persistent_resource import ( +from google.cloud.aiplatform_v1.types.persistent_resource import ( PersistentResource, ) -from google.cloud.aiplatform_v1beta1.types.persistent_resource import ( +from google.cloud.aiplatform_v1.types.persistent_resource import ( ResourceRuntime, ) -from google.cloud.aiplatform_v1beta1.types.persistent_resource_service import ( +from google.cloud.aiplatform_v1.types.persistent_resource_service import ( DeletePersistentResourceRequest, ) import test_constants as tc diff --git a/tests/unit/vertex_ray/test_cluster_init.py b/tests/unit/vertex_ray/test_cluster_init.py index c0992e5e03..fcb4fa5b6e 100644 --- a/tests/unit/vertex_ray/test_cluster_init.py +++ b/tests/unit/vertex_ray/test_cluster_init.py @@ -22,10 +22,10 @@ Resources, NodeImages, ) -from google.cloud.aiplatform_v1beta1.services.persistent_resource_service import ( +from google.cloud.aiplatform_v1.services.persistent_resource_service import ( PersistentResourceServiceClient, ) -from google.cloud.aiplatform_v1beta1.types import persistent_resource_service +from google.cloud.aiplatform_v1.types import persistent_resource_service import test_constants as tc import mock import pytest @@ -478,6 +478,15 @@ def test_create_ray_cluster_byosa_success( request, ) + def test_create_ray_cluster_2_4_deprecated_error(self): + with pytest.raises(RuntimeError) as e: + vertex_ray.create_ray_cluster( + head_node_type=Resources(node_count=3), + network=tc.ProjectConstants.TEST_VPC_NETWORK, + ray_version="2.4", + ) + e.match(regexp=r"Please use Ray version = 2.9.3") + def test_create_ray_cluster_head_multinode_error(self): with pytest.raises(ValueError) as e: vertex_ray.create_ray_cluster( @@ -490,7 +499,7 @@ def test_create_ray_cluster_python_version_error(self): with pytest.raises(ValueError) as e: vertex_ray.create_ray_cluster( network=tc.ProjectConstants.TEST_VPC_NETWORK, - python_version="3_8", + python_version="3.8", ) e.match(regexp=r"The supported Python version is 3") @@ -498,7 +507,7 @@ def test_create_ray_cluster_ray_version_error(self): with pytest.raises(ValueError) as e: vertex_ray.create_ray_cluster( network=tc.ProjectConstants.TEST_VPC_NETWORK, - ray_version="2_1", + ray_version="2.1", ) e.match(regexp=r"The supported Ray versions are ") diff --git a/tests/unit/vertex_ray/test_constants.py b/tests/unit/vertex_ray/test_constants.py index 866d142d3d..8326b81595 100644 --- a/tests/unit/vertex_ray/test_constants.py +++ b/tests/unit/vertex_ray/test_constants.py @@ -22,27 +22,27 @@ from google.cloud.aiplatform.vertex_ray.util.resources import ( Resources, ) -from google.cloud.aiplatform_v1beta1.types.machine_resources import DiskSpec -from google.cloud.aiplatform_v1beta1.types.machine_resources import ( +from google.cloud.aiplatform_v1.types.machine_resources import DiskSpec +from google.cloud.aiplatform_v1.types.machine_resources import ( MachineSpec, ) -from google.cloud.aiplatform_v1beta1.types.persistent_resource import ( +from google.cloud.aiplatform_v1.types.persistent_resource import ( PersistentResource, ) -from google.cloud.aiplatform_v1beta1.types.persistent_resource import ( +from google.cloud.aiplatform_v1.types.persistent_resource import ( RayMetricSpec, ) -from google.cloud.aiplatform_v1beta1.types.persistent_resource import RaySpec -from google.cloud.aiplatform_v1beta1.types.persistent_resource import ( +from google.cloud.aiplatform_v1.types.persistent_resource import RaySpec +from google.cloud.aiplatform_v1.types.persistent_resource import ( ResourcePool, ) -from google.cloud.aiplatform_v1beta1.types.persistent_resource import ( +from google.cloud.aiplatform_v1.types.persistent_resource import ( ResourceRuntime, ) -from google.cloud.aiplatform_v1beta1.types.persistent_resource import ( +from google.cloud.aiplatform_v1.types.persistent_resource import ( ResourceRuntimeSpec, ) -from google.cloud.aiplatform_v1beta1.types.persistent_resource import ( +from google.cloud.aiplatform_v1.types.persistent_resource import ( ServiceAccountSpec, ) import pytest diff --git a/tests/unit/vertex_ray/test_ray_prediction.py b/tests/unit/vertex_ray/test_ray_prediction.py index 020d93e465..fdab42c425 100644 --- a/tests/unit/vertex_ray/test_ray_prediction.py +++ b/tests/unit/vertex_ray/test_ray_prediction.py @@ -65,11 +65,6 @@ def ray_tensorflow_checkpoint(): @pytest.fixture() def ray_checkpoint_from_dict(): - if ray.__version__ == "2.4.0": - checkpoint_data = {"data": 123} - return ray.air.checkpoint.Checkpoint.from_dict(checkpoint_data) - - # Checkpoint.from_dict() removed in future versions try: return ray.train.Checkpoint.from_directory("/tmp/checkpoint") except AttributeError: From f9d4b515c38a24746c99df2603382a94b74f1e42 Mon Sep 17 00:00:00 2001 From: Jaycee Li Date: Tue, 4 Jun 2024 13:48:37 -0700 Subject: [PATCH 11/36] chore: GenAI - Fixed typo in the GenAI README PiperOrigin-RevId: 640270936 --- vertexai/generative_models/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vertexai/generative_models/README.md b/vertexai/generative_models/README.md index 941a287e80..d30f40448d 100644 --- a/vertexai/generative_models/README.md +++ b/vertexai/generative_models/README.md @@ -133,9 +133,9 @@ print(chat.send_message( #### Automatic Function calling ``` -from vertexai..preview generative_models import GenerativeModel, Tool, FunctionDeclaration, AutomaticFunctionCallingResponder +from vertexai.preview.generative_models import GenerativeModel, Tool, FunctionDeclaration, AutomaticFunctionCallingResponder -# First, create functions that the model is can use to answer your questions. +# First, create functions that the model can use to answer your questions. def get_current_weather(location: str, unit: str = "centigrade"): """Gets weather in the specified location. From 945b9e4835149111cd33beaee4301f3d8f05f59d Mon Sep 17 00:00:00 2001 From: A Vertex SDK engineer Date: Tue, 4 Jun 2024 16:03:05 -0700 Subject: [PATCH 12/36] fix: Fix failed unit tests due to google-cloud-storage upgrade. PiperOrigin-RevId: 640313157 --- tests/unit/aiplatform/test_metadata.py | 5 ++++- tests/unit/aiplatform/test_metadata_models.py | 5 ++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/tests/unit/aiplatform/test_metadata.py b/tests/unit/aiplatform/test_metadata.py index 775a4430a1..22d2446c9b 100644 --- a/tests/unit/aiplatform/test_metadata.py +++ b/tests/unit/aiplatform/test_metadata.py @@ -91,7 +91,10 @@ _TEST_RUN = "run-1" _TEST_OTHER_RUN = "run-2" _TEST_DISPLAY_NAME = "test-display-name" -_TEST_CREDENTIALS = mock.Mock(spec=credentials.AnonymousCredentials()) +_TEST_CREDENTIALS = mock.Mock( + spec=credentials.AnonymousCredentials(), + universe_domain="googleapis.com", +) _TEST_BUCKET_NAME = "gs://test-bucket" # resource attributes diff --git a/tests/unit/aiplatform/test_metadata_models.py b/tests/unit/aiplatform/test_metadata_models.py index bf3bb1dcbe..df970949c0 100644 --- a/tests/unit/aiplatform/test_metadata_models.py +++ b/tests/unit/aiplatform/test_metadata_models.py @@ -50,7 +50,10 @@ _TEST_PARENT = ( f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/metadataStores/default" ) -_TEST_CREDENTIALS = mock.Mock(spec=auth_credentials.AnonymousCredentials()) +_TEST_CREDENTIALS = mock.Mock( + spec=auth_credentials.AnonymousCredentials(), + universe_domain="googleapis.com", +) # artifact From 768af6772ade2b67b90a05ae3db95039a3f2786d Mon Sep 17 00:00:00 2001 From: Jaycee Li Date: Wed, 5 Jun 2024 10:41:58 -0700 Subject: [PATCH 13/36] feat: GenAI - Allowed callable functions to return values directly in Automatic Function Calling PiperOrigin-RevId: 640574734 --- tests/unit/vertexai/test_generative_models.py | 47 ++++++++++++++++++- .../generative_models/_generative_models.py | 5 ++ 2 files changed, 51 insertions(+), 1 deletion(-) diff --git a/tests/unit/vertexai/test_generative_models.py b/tests/unit/vertexai/test_generative_models.py index 36a2c1dc72..948d50f12f 100644 --- a/tests/unit/vertexai/test_generative_models.py +++ b/tests/unit/vertexai/test_generative_models.py @@ -950,7 +950,7 @@ def test_generate_content_vertex_rag_retriever(self): attribute="generate_content", new=mock_generate_content, ) - def test_chat_automatic_function_calling(self): + def test_chat_automatic_function_calling_with_function_returning_dict(self): generative_models = preview_generative_models get_current_weather_func = generative_models.FunctionDeclaration.from_func( get_current_weather @@ -984,6 +984,51 @@ def test_chat_automatic_function_calling(self): chat2.send_message("What is the weather like in Boston?") assert err.match("Exceeded the maximum") + @mock.patch.object( + target=prediction_service.PredictionServiceClient, + attribute="generate_content", + new=mock_generate_content, + ) + def test_chat_automatic_function_calling_with_function_returning_value(self): + # Define a new function that returns a value instead of a dict. + def get_current_weather(location: str): + """Gets weather in the specified location. + + Args: + location: The location for which to get the weather. + + Returns: + The weather information as a str. + """ + if location == "Boston": + return "Super nice, but maybe a bit hot." + return "Unavailable" + + generative_models = preview_generative_models + get_current_weather_func = generative_models.FunctionDeclaration.from_func( + get_current_weather + ) + weather_tool = generative_models.Tool( + function_declarations=[get_current_weather_func], + ) + + model = generative_models.GenerativeModel( + "gemini-pro", + # Specifying the tools once to avoid specifying them in every request + tools=[weather_tool], + ) + afc_responder = generative_models.AutomaticFunctionCallingResponder( + max_automatic_function_calls=5, + ) + chat = model.start_chat(responder=afc_responder) + + response1 = chat.send_message("What is the weather like in Boston?") + assert response1.text.startswith("The weather in Boston is") + assert "nice" in response1.text + assert len(chat.history) == 4 + assert chat.history[-3].parts[0].function_call + assert chat.history[-2].parts[0].function_response + EXPECTED_SCHEMA_FOR_GET_CURRENT_WEATHER = { "title": "get_current_weather", diff --git a/vertexai/generative_models/_generative_models.py b/vertexai/generative_models/_generative_models.py index 192928e935..85afc688a5 100644 --- a/vertexai/generative_models/_generative_models.py +++ b/vertexai/generative_models/_generative_models.py @@ -15,6 +15,7 @@ """Classes for working with generative models.""" # pylint: disable=bad-continuation, line-too-long, protected-access +from collections.abc import Mapping import copy import io import json @@ -2422,6 +2423,10 @@ def respond_to_model_response( # due to: AttributeError: type object 'MapComposite' has no attribute 'to_dict' function_args = type(function_call).to_dict(function_call)["args"] function_call_result = callable_function._function(**function_args) + if not isinstance(function_call_result, Mapping): + # If the function returns a single value, wrap it in the + # format that Part.from_function_response can accept. + function_call_result = {"result": function_call_result} except Exception as ex: raise RuntimeError( f"""Error raised when calling function "{function_call.name}" as requested by the model.""" From ebb8f6233bfaf7bbfbc8a5fbfb22cbc560f23557 Mon Sep 17 00:00:00 2001 From: Matthew Tang Date: Wed, 5 Jun 2024 12:36:57 -0700 Subject: [PATCH 14/36] chore: Fix typo in featurestore validate_id error message. PiperOrigin-RevId: 640614155 --- google/cloud/aiplatform/utils/featurestore_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/google/cloud/aiplatform/utils/featurestore_utils.py b/google/cloud/aiplatform/utils/featurestore_utils.py index b57824e15f..f40cff1f26 100644 --- a/google/cloud/aiplatform/utils/featurestore_utils.py +++ b/google/cloud/aiplatform/utils/featurestore_utils.py @@ -57,7 +57,7 @@ def validate_id(resource_id: str) -> None: ValueError if resource_id is invalid. """ if not re.compile(r"^" + RESOURCE_ID_PATTERN_REGEX + r"$").match(resource_id): - raise ValueError("Resource ID {resource_id} is not a valied resource id.") + raise ValueError("Resource ID {resource_id} is not a valid resource id.") def validate_feature_id(feature_id: str) -> None: From ec4ec8f1214b3da12728c30a002b7f4632f4a90e Mon Sep 17 00:00:00 2001 From: Amy Wu Date: Wed, 5 Jun 2024 12:37:01 -0700 Subject: [PATCH 15/36] BREAKING_CHANGE: deprecate Vertex SDK data science package PiperOrigin-RevId: 640614179 --- tests/unit/aiplatform/test_language_models.py | 88 - tests/unit/aiplatform/test_vision_models.py | 37 +- tests/unit/vertexai/test_any_serializer.py | 1482 ----------- .../unit/vertexai/test_data_serializer_dev.py | 149 -- tests/unit/vertexai/test_developer_mark.py | 295 --- tests/unit/vertexai/test_model_utils.py | 648 ----- .../vertexai/test_persistent_resource_util.py | 231 -- .../test_remote_container_training.py | 586 ----- tests/unit/vertexai/test_remote_prediction.py | 139 -- tests/unit/vertexai/test_remote_specs.py | 712 ------ tests/unit/vertexai/test_remote_training.py | 2167 ----------------- tests/unit/vertexai/test_serializers.py | 1414 ----------- tests/unit/vertexai/test_serializers_base.py | 64 - tests/unit/vertexai/test_tabnet_trainer.py | 812 ------ .../test_vizier_hyperparameter_tuner.py | 1850 -------------- vertexai/preview/__init__.py | 26 - vertexai/preview/_workflow/__init__.py | 15 - vertexai/preview/_workflow/driver/__init__.py | 276 --- vertexai/preview/_workflow/driver/remote.py | 115 - .../preview/_workflow/executor/__init__.py | 54 - .../executor/persistent_resource_util.py | 265 -- .../preview/_workflow/executor/prediction.py | 38 - .../executor/remote_container_training.py | 220 -- .../preview/_workflow/executor/training.py | 839 ------- .../_workflow/executor/training_script.py | 234 -- .../preview/_workflow/launcher/__init__.py | 56 - .../serialization_engine/__init__.py | 16 - .../serialization_engine/any_serializer.py | 578 ----- .../serialization_engine/serializers.py | 1422 ----------- .../serialization_engine/serializers_base.py | 278 --- vertexai/preview/_workflow/shared/__init__.py | 43 - vertexai/preview/_workflow/shared/configs.py | 373 --- .../preview/_workflow/shared/constants.py | 39 - .../_workflow/shared/data_serializer_utils.py | 186 -- .../_workflow/shared/data_structures.py | 68 - .../preview/_workflow/shared/model_utils.py | 489 ---- .../_workflow/shared/supported_frameworks.py | 367 --- vertexai/preview/developer/__init__.py | 56 - vertexai/preview/developer/base_classes.py | 28 - vertexai/preview/developer/mark.py | 197 -- vertexai/preview/developer/remote_specs.py | 892 ------- .../preview/hyperparameter_tuning/__init__.py | 30 - .../vizier_hyperparameter_tuner.py | 984 -------- vertexai/preview/initializer.py | 125 - vertexai/preview/tabular_models/__init__.py | 29 - .../preview/tabular_models/tabnet_trainer.py | 412 ---- vertexai/resources/preview/__init__.py | 5 +- 47 files changed, 5 insertions(+), 19424 deletions(-) delete mode 100644 tests/unit/vertexai/test_any_serializer.py delete mode 100644 tests/unit/vertexai/test_data_serializer_dev.py delete mode 100644 tests/unit/vertexai/test_developer_mark.py delete mode 100644 tests/unit/vertexai/test_model_utils.py delete mode 100644 tests/unit/vertexai/test_persistent_resource_util.py delete mode 100644 tests/unit/vertexai/test_remote_container_training.py delete mode 100644 tests/unit/vertexai/test_remote_prediction.py delete mode 100644 tests/unit/vertexai/test_remote_specs.py delete mode 100644 tests/unit/vertexai/test_remote_training.py delete mode 100644 tests/unit/vertexai/test_serializers.py delete mode 100644 tests/unit/vertexai/test_serializers_base.py delete mode 100644 tests/unit/vertexai/test_tabnet_trainer.py delete mode 100644 tests/unit/vertexai/test_vizier_hyperparameter_tuner.py delete mode 100644 vertexai/preview/_workflow/__init__.py delete mode 100644 vertexai/preview/_workflow/driver/__init__.py delete mode 100644 vertexai/preview/_workflow/driver/remote.py delete mode 100644 vertexai/preview/_workflow/executor/__init__.py delete mode 100644 vertexai/preview/_workflow/executor/persistent_resource_util.py delete mode 100644 vertexai/preview/_workflow/executor/prediction.py delete mode 100644 vertexai/preview/_workflow/executor/remote_container_training.py delete mode 100644 vertexai/preview/_workflow/executor/training.py delete mode 100644 vertexai/preview/_workflow/executor/training_script.py delete mode 100644 vertexai/preview/_workflow/launcher/__init__.py delete mode 100644 vertexai/preview/_workflow/serialization_engine/__init__.py delete mode 100644 vertexai/preview/_workflow/serialization_engine/any_serializer.py delete mode 100644 vertexai/preview/_workflow/serialization_engine/serializers.py delete mode 100644 vertexai/preview/_workflow/serialization_engine/serializers_base.py delete mode 100644 vertexai/preview/_workflow/shared/__init__.py delete mode 100644 vertexai/preview/_workflow/shared/configs.py delete mode 100644 vertexai/preview/_workflow/shared/constants.py delete mode 100644 vertexai/preview/_workflow/shared/data_serializer_utils.py delete mode 100644 vertexai/preview/_workflow/shared/data_structures.py delete mode 100644 vertexai/preview/_workflow/shared/model_utils.py delete mode 100644 vertexai/preview/_workflow/shared/supported_frameworks.py delete mode 100644 vertexai/preview/developer/__init__.py delete mode 100644 vertexai/preview/developer/base_classes.py delete mode 100644 vertexai/preview/developer/mark.py delete mode 100644 vertexai/preview/developer/remote_specs.py delete mode 100644 vertexai/preview/hyperparameter_tuning/__init__.py delete mode 100644 vertexai/preview/hyperparameter_tuning/vizier_hyperparameter_tuner.py delete mode 100644 vertexai/preview/initializer.py delete mode 100644 vertexai/preview/tabular_models/__init__.py delete mode 100644 vertexai/preview/tabular_models/tabnet_trainer.py diff --git a/tests/unit/aiplatform/test_language_models.py b/tests/unit/aiplatform/test_language_models.py index 443de6aa95..38242ae001 100644 --- a/tests/unit/aiplatform/test_language_models.py +++ b/tests/unit/aiplatform/test_language_models.py @@ -65,7 +65,6 @@ prediction_service as gca_prediction_service_v1beta1, ) -import vertexai from vertexai.preview import ( language_models as preview_language_models, ) @@ -4736,93 +4735,6 @@ def test_batch_prediction_for_text_embedding(self): model_parameters={}, ) - def test_text_generation_top_level_from_pretrained_preview(self): - """Tests the text generation model.""" - aiplatform.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - ) - with mock.patch.object( - target=model_garden_service_client.ModelGardenServiceClient, - attribute="get_publisher_model", - return_value=gca_publisher_model.PublisherModel( - _TEXT_BISON_PUBLISHER_MODEL_DICT - ), - ) as mock_get_publisher_model: - model = vertexai.preview.from_pretrained( - foundation_model_name="text-bison@001" - ) - - assert isinstance(model, preview_language_models.TextGenerationModel) - - mock_get_publisher_model.assert_called_with( - name="publishers/google/models/text-bison@001", retry=base._DEFAULT_RETRY - ) - assert mock_get_publisher_model.call_count == 1 - - assert ( - model._model_resource_name - == f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/publishers/google/models/text-bison@001" - ) - - # Test that methods on TextGenerationModel still work as expected - gca_predict_response = gca_prediction_service.PredictResponse() - gca_predict_response.predictions.append(_TEST_TEXT_GENERATION_PREDICTION) - - with mock.patch.object( - target=prediction_service_client.PredictionServiceClient, - attribute="predict", - return_value=gca_predict_response, - ): - response = model.predict( - "What is the best recipe for banana bread? Recipe:", - max_output_tokens=128, - temperature=0.0, - top_p=1.0, - top_k=5, - ) - - assert response.text == _TEST_TEXT_GENERATION_PREDICTION["content"] - assert ( - response.raw_prediction_response.predictions[0] - == _TEST_TEXT_GENERATION_PREDICTION - ) - assert ( - response.safety_attributes["Violent"] - == _TEST_TEXT_GENERATION_PREDICTION["safetyAttributes"]["scores"][0] - ) - - def test_text_embedding_top_level_from_pretrained_preview(self): - """Tests the text embedding model.""" - aiplatform.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - ) - with mock.patch.object( - target=model_garden_service_client.ModelGardenServiceClient, - attribute="get_publisher_model", - return_value=gca_publisher_model.PublisherModel( - _TEXT_EMBEDDING_GECKO_PUBLISHER_MODEL_DICT - ), - ) as mock_get_publisher_model: - model = vertexai.preview.from_pretrained( - foundation_model_name="textembedding-gecko@001" - ) - - assert isinstance(model, preview_language_models.TextEmbeddingModel) - - assert ( - model._endpoint_name - == f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/publishers/google/models/textembedding-gecko@001" - ) - - mock_get_publisher_model.assert_called_with( - name="publishers/google/models/textembedding-gecko@001", - retry=base._DEFAULT_RETRY, - ) - - assert mock_get_publisher_model.call_count == 1 - # TODO (b/285946649): add more test coverage before public preview release @pytest.mark.usefixtures("google_auth_mock") diff --git a/tests/unit/aiplatform/test_vision_models.py b/tests/unit/aiplatform/test_vision_models.py index 6c19870401..14bc293582 100644 --- a/tests/unit/aiplatform/test_vision_models.py +++ b/tests/unit/aiplatform/test_vision_models.py @@ -39,7 +39,7 @@ from google.cloud.aiplatform.compat.types import ( publisher_model as gca_publisher_model, ) -import vertexai + from vertexai import vision_models as ga_vision_models from vertexai.preview import ( vision_models as preview_vision_models, @@ -221,34 +221,6 @@ def _get_image_generation_model( return model - def _get_preview_image_generation_model_top_level_from_pretrained( - self, - ) -> preview_vision_models.ImageGenerationModel: - """Gets the image generation model from the top-level vertexai.preview.from_pretrained method.""" - aiplatform.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - ) - with mock.patch.object( - target=model_garden_service_client.ModelGardenServiceClient, - attribute="get_publisher_model", - return_value=gca_publisher_model.PublisherModel( - _IMAGE_GENERATION_PUBLISHER_MODEL_DICT - ), - ) as mock_get_publisher_model: - model = vertexai.preview.from_pretrained( - foundation_model_name="imagegeneration@002" - ) - - mock_get_publisher_model.assert_called_with( - name="publishers/google/models/imagegeneration@002", - retry=base._DEFAULT_RETRY, - ) - - assert mock_get_publisher_model.call_count == 1 - - return model - def test_from_pretrained(self): model = self._get_image_generation_model() assert ( @@ -256,13 +228,6 @@ def test_from_pretrained(self): == f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/publishers/google/models/imagegeneration@002" ) - def test_top_level_from_pretrained_preview(self): - model = self._get_preview_image_generation_model_top_level_from_pretrained() - assert ( - model._endpoint_name - == f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/publishers/google/models/imagegeneration@002" - ) - def test_generate_images(self): """Tests the image generation model.""" model = self._get_image_generation_model() diff --git a/tests/unit/vertexai/test_any_serializer.py b/tests/unit/vertexai/test_any_serializer.py deleted file mode 100644 index 4d8a9227eb..0000000000 --- a/tests/unit/vertexai/test_any_serializer.py +++ /dev/null @@ -1,1482 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import mock -import pytest - -import cloudpickle -import logging -import json -import os -from typing import Any - -import vertexai -from vertexai.preview import developer -from vertexai.preview._workflow.serialization_engine import ( - any_serializer, - serializers, - serializers_base, -) -from vertexai.preview._workflow.shared import constants - -import pandas as pd -import sklearn -from sklearn.linear_model import LogisticRegression -import tensorflow as tf -from tensorflow import keras -import torch - -try: - # pylint: disable=g-import-not-at-top - import lightning.pytorch as pl -except ImportError: - pl = None - -try: - import bigframes as bf -except ImportError: - bf = None - -# lightning trainer and bigframes dataframe are not in this scheme since -# the test environment may not have these packages. -_TEST_SERIALIZATION_SCHEME = { - object: serializers.CloudPickleSerializer, - sklearn.base.BaseEstimator: serializers.SklearnEstimatorSerializer, - keras.models.Model: serializers.KerasModelSerializer, - keras.callbacks.History: serializers.KerasHistoryCallbackSerializer, - tf.data.Dataset: serializers.TFDatasetSerializer, - torch.nn.Module: serializers.TorchModelSerializer, - torch.utils.data.DataLoader: serializers.TorchDataLoaderSerializer, - pd.DataFrame: serializers.PandasDataSerializer, -} - - -@pytest.fixture -def any_serializer_instance(): - return any_serializer.AnySerializer() - - -@pytest.fixture -def torch_dataloader_serializer(): - return serializers.TorchDataLoaderSerializer() - - -@pytest.fixture -def bigframe_serializer(): - return serializers.BigframeSerializer() - - -@pytest.fixture -def tf_dataset_serializer(): - return serializers.TFDatasetSerializer() - - -@pytest.fixture -def mock_keras_model_serialize(): - def stateful_serialize(self, to_serialize, gcs_path): - del self, to_serialize - serializers.KerasModelSerializer._metadata.dependencies = ["keras==1.0.0"] - return gcs_path - - with mock.patch.object( - serializers.KerasModelSerializer, "serialize", new=stateful_serialize - ) as keras_model_serialize: - yield keras_model_serialize - serializers.KerasModelSerializer._metadata.dependencies = [] - - -@pytest.fixture -def mock_keras_model_deserialize(): - with mock.patch.object( - serializers.KerasModelSerializer, "deserialize", autospec=True - ) as keras_model_deserialize: - yield keras_model_deserialize - - -@pytest.fixture -def mock_sklearn_estimator_serialize(): - def stateful_serialize(self, to_serialize, gcs_path): - del self, to_serialize - serializers.SklearnEstimatorSerializer._metadata.dependencies = [ - "sklearn_dependency1==1.0.0" - ] - return gcs_path - - with mock.patch.object( - serializers.SklearnEstimatorSerializer, - "serialize", - new=stateful_serialize, - ) as sklearn_estimator_serialize: - yield sklearn_estimator_serialize - serializers.SklearnEstimatorSerializer._metadata.dependencies = [] - - -@pytest.fixture -def mock_sklearn_estimator_deserialize(): - with mock.patch.object( - serializers.SklearnEstimatorSerializer, "deserialize", autospec=True - ) as sklearn_estimator_deserialize: - yield sklearn_estimator_deserialize - - -@pytest.fixture -def mock_torch_model_serialize(): - def stateful_serialize(self, to_serialize, gcs_path): - del self, to_serialize - serializers.TorchModelSerializer._metadata.dependencies = ["torch==1.0.0"] - return gcs_path - - with mock.patch.object( - serializers.TorchModelSerializer, "serialize", new=stateful_serialize - ) as torch_model_serialize: - yield torch_model_serialize - serializers.TorchModelSerializer._metadata.dependencies = [] - - -@pytest.fixture -def mock_torch_model_deserialize(): - with mock.patch.object( - serializers.TorchModelSerializer, "deserialize", autospec=True - ) as torch_model_deserialize: - yield torch_model_deserialize - - -@pytest.fixture -def mock_torch_dataloader_serialize(tmp_path): - def stateful_serialize(self, to_serialize, gcs_path): - del self, to_serialize - serializers.TorchDataLoaderSerializer._metadata.dependencies = ["torch==1.0.0"] - return gcs_path - - with mock.patch.object( - serializers.TorchDataLoaderSerializer, "serialize", new=stateful_serialize - ) as torch_dataloader_serialize: - yield torch_dataloader_serialize - serializers.TorchDataLoaderSerializer._metadata.dependencies = [] - - -@pytest.fixture -def mock_torch_dataloader_deserialize(): - with mock.patch.object( - serializers.TorchDataLoaderSerializer, "deserialize", autospec=True - ) as torch_dataloader_serializer: - yield torch_dataloader_serializer - - -@pytest.fixture -def mock_tf_dataset_serialize(tmp_path): - def stateful_serialize(self, to_serialize, gcs_path): - serializers.TFDatasetSerializer._metadata.dependencies = ["tensorflow==1.0.0"] - try: - to_serialize.save(str(tmp_path / "tf_dataset")) - except AttributeError: - tf.data.experimental.save(to_serialize, str(tmp_path / "tf_dataset")) - return gcs_path - - with mock.patch.object( - serializers.TFDatasetSerializer, "serialize", new=stateful_serialize - ) as tf_dataset_serialize: - yield tf_dataset_serialize - serializers.TFDatasetSerializer._metadata.dependencies = [] - - -@pytest.fixture -def mock_tf_dataset_deserialize(): - with mock.patch.object( - serializers.TFDatasetSerializer, "deserialize", autospec=True - ) as tf_dataset_serializer: - yield tf_dataset_serializer - - -@pytest.fixture -def mock_pandas_data_serialize(): - def stateful_serialize(self, to_serialize, gcs_path): - del self, to_serialize - serializers.PandasDataSerializer._metadata.dependencies = ["pandas==1.0.0"] - return gcs_path - - with mock.patch.object( - serializers.PandasDataSerializer, "serialize", new=stateful_serialize - ) as data_serialize: - yield data_serialize - serializers.PandasDataSerializer._metadata.dependencies = [] - - -@pytest.fixture -def mock_pandas_data_deserialize(): - with mock.patch.object( - serializers.PandasDataSerializer, "deserialize", autospec=True - ) as pandas_data_deserialize: - yield pandas_data_deserialize - - -# TODO(b/295338623): Test correctness of Bigframes serialize/deserialize -@pytest.fixture -def mock_bigframe_deserialize_sklearn(): - with mock.patch.object( - serializers.BigframeSerializer, "_deserialize_sklearn", autospec=True - ) as bigframe_deserialize_sklearn: - yield bigframe_deserialize_sklearn - - -# TODO(b/295338623): Test correctness of Bigframes serialize/deserialize -@pytest.fixture -def mock_bigframe_deserialize_torch(): - with mock.patch.object( - serializers.BigframeSerializer, "_deserialize_torch", autospec=True - ) as bigframe_deserialize_torch: - yield bigframe_deserialize_torch - - -# TODO(b/295338623): Test correctness of Bigframes serialize/deserialize -@pytest.fixture -def mock_bigframe_deserialize_tensorflow(): - with mock.patch.object( - serializers.BigframeSerializer, "_deserialize_tensorflow", autospec=True - ) as bigframe_deserialize_tensorflow: - yield bigframe_deserialize_tensorflow - - -@pytest.fixture -def mock_cloudpickle_serialize(): - def stateful_serialize(self, to_serialize, gcs_path, **kwargs): - del self, to_serialize, kwargs - serializers.CloudPickleSerializer._metadata.dependencies = [ - "cloudpickle==1.0.0" - ] - return gcs_path - - with mock.patch.object( - serializers.CloudPickleSerializer, "serialize", new=stateful_serialize - ) as cloudpickle_serialize: - yield cloudpickle_serialize - serializers.CloudPickleSerializer._metadata.dependencies = [] - - -@pytest.fixture -def mock_cloudpickle_deserialize(): - with mock.patch.object( - serializers.CloudPickleSerializer, "deserialize", autospec=True - ) as cloudpickle_deserialize: - yield cloudpickle_deserialize - - -class TestTorchClass(torch.nn.Module): - def __init__(self, input_size=4): - super().__init__() - self.linear_relu_stack = torch.nn.Sequential( - torch.nn.Linear(input_size, 3), torch.nn.ReLU(), torch.nn.Linear(3, 2) - ) - - def forward(self, x): - logits = self.linear_relu_stack(x) - return logits - - -class TestAnySerializer: - """Tests that AnySerializer is acting as 'controller' and router.""" - - def test_any_serializer_register_predefined_serializers(self, caplog): - with caplog.at_level( - level=logging.DEBUG, logger="vertexai.serialization_engine" - ): - serializers_base.Serializer._instances = {} - serializer_instance = any_serializer.AnySerializer() - - if pl: - _TEST_SERIALIZATION_SCHEME[ - pl.Trainer - ] = serializers.LightningTrainerSerializer - else: - # Lightning trainer is not registered. - # Check the logs to make sure we tried to register them. - assert ( - f"Failed to register {serializers.LightningTrainerSerializer} due to" - in caplog.text - ) - - if bf: - from bigframes.dataframe import DataFrame - - _TEST_SERIALIZATION_SCHEME[DataFrame] = serializers.BigframeSerializer - else: - # Bigframes dataframe is not registered. - # Check the logs to make sure we tried to register them. - assert ( - f"Failed to register {serializers.BigframeSerializer} due to" - in caplog.text - ) - - assert ( - serializer_instance._serialization_scheme == _TEST_SERIALIZATION_SCHEME - ) - - def test_any_serializer_with_wrapped_class(self): - # Reset the serializer instances - serializers_base.Serializer._instances = {} - - # Wrap a ML class that we have predefined serializer - unwrapped_keras_class = keras.models.Model - keras.models.Model = vertexai.preview.remote(keras.models.Model) - - try: - # Assert that AnySerializer still registered the original class - serializer_instance = any_serializer.AnySerializer() - assert keras.models.Model not in serializer_instance._serialization_scheme - assert unwrapped_keras_class in serializer_instance._serialization_scheme - assert ( - serializer_instance._serialization_scheme[unwrapped_keras_class] - == serializers.KerasModelSerializer - ) - except Exception as e: - raise e - finally: - # Revert the class after testing - keras.models.Model = unwrapped_keras_class - - def test_any_serializer_global_metadata_created( - self, mock_cloudpickle_serialize, any_serializer_instance, tmp_path - ): - # Arrange - class RandomClass: - pass - - class Nested: - pass - - obj = RandomClass() - os.makedirs(tmp_path / "job_id/input") - fake_gcs_path = os.fspath(tmp_path / "job_id/input/random_obj") - - param_to_be_serialized = Nested() - # param_to_be_serialized will be serialized to this path - expected_extra_obj_param_path = os.fspath( - tmp_path / "job_id/input/serialization_args/extra_obj_param" - ) - expected_serialized = { - str(fake_gcs_path): any_serializer.SerializedEntryMetadata( - serialization_id=id(obj), - obj=obj, - serializer_args={ - "extra_int_param": any_serializer.SerializerArg(value=1), - "extra_float_param": any_serializer.SerializerArg(value=1.0), - "extra_dict_param": any_serializer.SerializerArg( - value={"key1": 10} - ), - "extra_list_param": any_serializer.SerializerArg(value=[0, 1, 2]), - "extra_obj_param": any_serializer.SerializerArg( - gcs_path=expected_extra_obj_param_path - ), - }, - ), - expected_extra_obj_param_path: any_serializer.SerializedEntryMetadata( - serialization_id=id(param_to_be_serialized), - obj=param_to_be_serialized, - serializer_args={}, - ), - } - expected_on_disk_global_metadata = { - "serializer": "AnySerializer", - "dependencies": [], - "custom_commands": [], - "serialized": { - str(fake_gcs_path): { - "serialization_id": id(obj), - "serializer_args": { - "extra_int_param": {"value": 1, "gcs_path": None}, - "extra_float_param": {"value": 1.0, "gcs_path": None}, - "extra_dict_param": {"value": {"key1": 10}, "gcs_path": None}, - "extra_list_param": {"value": [0, 1, 2], "gcs_path": None}, - "extra_obj_param": { - "value": None, - "gcs_path": expected_extra_obj_param_path, - }, - }, - }, - expected_extra_obj_param_path: { - "serialization_id": id(param_to_be_serialized), - "serializer_args": {}, - }, - }, - } - - # Act - any_serializer_instance.serialize( - obj, - fake_gcs_path, - extra_int_param=1, - extra_float_param=1.0, - extra_dict_param={"key1": 10}, - extra_list_param=[0, 1, 2], - extra_obj_param=param_to_be_serialized, - ) - - # Assert - # first, assert the content of the in-memory global metadata - assert expected_serialized == any_serializer_instance._metadata.serialized - - # now, assert the content of the global metadata saved to the disk after - # saving to the disk - # Act again - global_metadata_path = os.fspath( - tmp_path / "job_id/input" / "serialization_global_metadata.json" - ) - - any_serializer_instance.save_global_metadata(global_metadata_path) - with open(global_metadata_path, "rb") as f: - metadata = json.load(f) - assert metadata == expected_on_disk_global_metadata - print("the read metadata is ", metadata) - - @mock.patch.object(serializers.CloudPickleSerializer, "serialize", autospec=True) - def test_any_serializer_serialize_custom_model_with_custom_serializer( - self, mock_cloudpickle_serializer_serialize, any_serializer_instance, tmp_path - ): - # Arrange - class CustomModel: - def __init__(self, weight: int = 0): - self.weight = weight - - @developer.mark.train() - def fit(self, X_train, y_train) -> "CustomModel": - self.weight += 1 - return self - - class CustomSerializer(developer.Serializer): - _metadata = developer.SerializationMetadata() - - def serialize( - self, to_serialize: CustomModel, gcs_path: str, extra_para: Any - ) -> str: - del extra_para - return gcs_path - - def deserialize(self, serialized_gcs_path: str) -> CustomModel: - # Pretend that the model is trained - return CustomModel(weight=1) - - CustomSerializer.register_requirements(["custom_dependency==1.0.0"]) - developer.register_serializer(CustomModel, CustomSerializer) - - fake_gcs_path = os.fspath(tmp_path / "job_id/input/input_estimator") - os.makedirs(tmp_path / "job_id/input") - custom_model = CustomModel() - - # Act - any_serializer_instance.serialize(custom_model, fake_gcs_path, extra_para=1) - - # Assert - metadata_path = ( - tmp_path - / f"job_id/input/{serializers_base.SERIALIZATION_METADATA_FILENAME}_input_estimator.json" - ) - - # Metadata should have the correct serializer information - with open(metadata_path, "rb") as f: - metadata = json.load(f) - # Metadata should record the dependency specifiers - assert ( - metadata[serializers_base.SERIALIZATION_METADATA_SERIALIZER_KEY] - == "CustomSerializer" - ) - assert metadata[serializers_base.SERIALIZATION_METADATA_DEPENDENCIES_KEY] == [ - "custom_dependency==1.0.0" - ] - - # During the serialization of the CustomModel object, we also serialize - # the serializer with CloudPicleSerializer. - custom_serializer_path = tmp_path / "job_id/input/CustomSerializer" - mock_cloudpickle_serializer_serialize.assert_called_once_with( - any_serializer_instance._instances[serializers.CloudPickleSerializer], - any_serializer_instance._instances[CustomSerializer], - str(custom_serializer_path), - ) - - @pytest.mark.usefixtures("mock_gcs_upload") - def test_any_serializer_serialize_sklearn_estimator( - self, any_serializer_instance, tmp_path, mock_sklearn_estimator_serialize - ): - # Arrange - fake_gcs_path = os.fspath(tmp_path / "job_id/input/input_estimator") - os.makedirs(tmp_path / "job_id/input") - sklearn_estimator = LogisticRegression() - - # Act - any_serializer_instance.serialize(sklearn_estimator, fake_gcs_path) - - # Assert - metadata_path = ( - tmp_path - / f"job_id/input/{serializers_base.SERIALIZATION_METADATA_FILENAME}_input_estimator.json" - ) - - # Metadata should have the correct serializer information - with open(metadata_path, "rb") as f: - metadata = json.load(f) - assert ( - metadata[serializers_base.SERIALIZATION_METADATA_SERIALIZER_KEY] - == "SklearnEstimatorSerializer" - ) - assert metadata[serializers_base.SERIALIZATION_METADATA_DEPENDENCIES_KEY] == [ - "sklearn_dependency1==1.0.0" - ] - - @pytest.mark.usefixtures("mock_gcs_upload") - def test_any_serializer_serialize_keras_model( - self, any_serializer_instance, tmp_path, mock_keras_model_serialize - ): - # Arrange - fake_gcs_path = os.fspath(tmp_path / "job_id/input/input_estimator") - os.makedirs(tmp_path / "job_id/input") - keras_model = keras.Sequential( - [keras.layers.Dense(5, input_shape=(4,)), keras.layers.Softmax()] - ) - - # Act - any_serializer_instance.serialize(keras_model, fake_gcs_path) - - # Assert - metadata_path = ( - tmp_path - / f"job_id/input/{serializers_base.SERIALIZATION_METADATA_FILENAME}_input_estimator.json" - ) - - # Metadata should have the correct serializer information - with open(metadata_path, "rb") as f: - metadata = json.load(f) - assert ( - metadata[serializers_base.SERIALIZATION_METADATA_SERIALIZER_KEY] - == "KerasModelSerializer" - ) - assert metadata[serializers_base.SERIALIZATION_METADATA_DEPENDENCIES_KEY] == [ - "keras==1.0.0" - ] - - @pytest.mark.usefixtures("mock_gcs_upload") - def test_any_serializer_serialize_torch_model( - self, any_serializer_instance, tmp_path, mock_torch_model_serialize - ): - # Arrange - fake_gcs_path = os.fspath(tmp_path / "job_id/input/input_estimator") - os.makedirs(tmp_path / "job_id/input") - torch_model = TestTorchClass() - - # Act - any_serializer_instance.serialize(torch_model, fake_gcs_path) - - # Assert - metadata_path = ( - tmp_path - / f"job_id/input/{serializers_base.SERIALIZATION_METADATA_FILENAME}_input_estimator.json" - ) - - # Metadata should have the correct serializer information - with open(metadata_path, "rb") as f: - metadata = json.load(f) - assert ( - metadata[serializers_base.SERIALIZATION_METADATA_SERIALIZER_KEY] - == "TorchModelSerializer" - ) - assert metadata[serializers_base.SERIALIZATION_METADATA_DEPENDENCIES_KEY] == [ - "torch==1.0.0" - ] - - @pytest.mark.usefixtures("mock_gcs_upload") - def test_any_serializer_serialize_dataframe( - self, any_serializer_instance, tmp_path, mock_pandas_data_serialize - ): - # Arrange - fake_gcs_path = os.fspath(tmp_path / "job_id/input/X") - os.makedirs(tmp_path / "job_id/input") - df = pd.DataFrame({"a": [1, 2, 3], "b": [1, 2, 3]}) - - # Act - any_serializer_instance.serialize(df, fake_gcs_path) - - # Assert - metadata_path = ( - tmp_path - / f"job_id/input/{serializers_base.SERIALIZATION_METADATA_FILENAME}_X.json" - ) - - # Metadata should have the correct serializer information - with open(metadata_path, "rb") as f: - metadata = json.load(f) - assert ( - metadata[serializers_base.SERIALIZATION_METADATA_SERIALIZER_KEY] - == "PandasDataSerializer" - ) - assert metadata[serializers_base.SERIALIZATION_METADATA_DEPENDENCIES_KEY] == [ - "pandas==1.0.0" - ] - - @pytest.mark.usefixtures("mock_gcs_upload") - def test_any_serializer_serialize_general_object( - self, any_serializer_instance, tmp_path, mock_cloudpickle_serialize - ): - # Arrange - fake_gcs_path = os.fspath(tmp_path / "job_id/input/general_object.cpkl") - os.makedirs(tmp_path / "job_id/input") - - class TestClass: - pass - - obj = TestClass() - - # Act - any_serializer_instance.serialize(obj, fake_gcs_path) - - # Assert - metadata_path = ( - tmp_path - / f"job_id/input/{serializers_base.SERIALIZATION_METADATA_FILENAME}_general_object.json" - ) - - # Metadata should have the correct serializer information - with open(metadata_path, "rb") as f: - metadata = json.load(f) - assert ( - metadata[serializers_base.SERIALIZATION_METADATA_SERIALIZER_KEY] - == "CloudPickleSerializer" - ) - assert metadata[serializers_base.SERIALIZATION_METADATA_DEPENDENCIES_KEY] == [ - "cloudpickle==1.0.0" - ] - - @pytest.mark.usefixtures("mock_gcs_upload") - def test_any_serializer_serialize_torch_dataloader( - self, any_serializer_instance, tmp_path, mock_torch_dataloader_serialize - ): - # Arrange - fake_gcs_path = os.fspath(tmp_path / "dataloader") - - dataloader = torch.utils.data.DataLoader( - torch.utils.data.TensorDataset( - torch.tensor([[1, 2, 3] for i in range(100)]), - torch.tensor([1] * 100), - ), - batch_size=10, - shuffle=True, - ) - - # Act - any_serializer_instance.serialize(dataloader, fake_gcs_path) - - # Assert - metadata_path = ( - tmp_path - / f"{serializers_base.SERIALIZATION_METADATA_FILENAME}_dataloader.json" - ) - with open(metadata_path, "rb") as f: - metadata = json.load(f) - - assert ( - metadata[serializers_base.SERIALIZATION_METADATA_SERIALIZER_KEY] - == "TorchDataLoaderSerializer" - ) - assert metadata[serializers_base.SERIALIZATION_METADATA_DEPENDENCIES_KEY] == [ - "torch==1.0.0" - ] - - @pytest.mark.usefixtures("mock_tf_dataset_serialize") - def test_any_serializer_serialize_tf_dataset( - self, any_serializer_instance, tmp_path, tf_dataset_serializer - ): - # Arrange - fake_gcs_path = os.fspath(tmp_path / "tf_dataset") - - tf_dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3]) - - # Act - any_serializer_instance.serialize(tf_dataset, fake_gcs_path) - - # Assert - metadata_path = ( - tmp_path - / f"{serializers_base.SERIALIZATION_METADATA_FILENAME}_tf_dataset.json" - ) - with open(metadata_path, "rb") as f: - metadata = json.load(f) - - assert ( - metadata[serializers_base.SERIALIZATION_METADATA_SERIALIZER_KEY] - == "TFDatasetSerializer" - ) - assert metadata[serializers_base.SERIALIZATION_METADATA_DEPENDENCIES_KEY] == [ - "tensorflow==1.0.0" - ] - - @pytest.mark.usefixtures("mock_gcs_upload") - def test_any_serializer_typed_serializer_failed_falling_back_to_cloudpickle( - self, any_serializer_instance, tmp_path, mock_cloudpickle_serialize - ): - # Arrange - fake_gcs_path = os.fspath(tmp_path / "job_id/input/input_estimator") - os.makedirs(tmp_path / "job_id/input") - keras_model = keras.Sequential( - [keras.layers.Dense(5, input_shape=(4,)), keras.layers.Softmax()] - ) - - with mock.patch.object( - serializers.KerasModelSerializer, "serialize", autospec=True - ) as mock_keras_model_serializer_serialize: - mock_keras_model_serializer_serialize.side_effect = Exception - # Act - any_serializer_instance.serialize(keras_model, fake_gcs_path) - - # Assert - metadata_path = ( - tmp_path - / f"job_id/input/{serializers_base.SERIALIZATION_METADATA_FILENAME}_input_estimator.json" - ) - - # Metadata should have the correct serializer information - with open(metadata_path, "rb") as f: - metadata = json.load(f) - assert ( - metadata[serializers_base.SERIALIZATION_METADATA_SERIALIZER_KEY] - == "CloudPickleSerializer" - ) - assert metadata[serializers_base.SERIALIZATION_METADATA_DEPENDENCIES_KEY] == [ - "cloudpickle==1.0.0" - ] - - def test_any_serializer_cloudpickle_serializer_failed_raise_serialization_error( - self, any_serializer_instance, tmp_path - ): - # Arrange - fake_gcs_path = os.fspath(tmp_path / "job_id/input/general_object.cpkl") - - class TestClass: - pass - - obj = TestClass() - - with mock.patch.object( - serializers.CloudPickleSerializer, "serialize", autospec=True - ) as mock_cloudpickle_serializer_serialize: - mock_cloudpickle_serializer_serialize.side_effect = Exception - # Act & Assert - with pytest.raises(serializers_base.SerializationError): - any_serializer_instance.serialize(obj, fake_gcs_path) - - @pytest.mark.usefixtures("mock_gcs_upload") - @mock.patch.object(any_serializer, "_check_dependency_versions", autospec=True) - def test_any_serializer_deserialize_custom_model_with_custom_serializer( - self, mocked_check_dependency_versions, any_serializer_instance, tmp_path - ): - # Arrange - class CustomModel: - def __init__(self, weight: int = 0): - self.weight = weight - - @developer.mark.train() - def fit(self, X_train, y_train): - self.weight += 1 - return self - - class CustomSerializer(developer.Serializer): - _metadata = developer.SerializationMetadata() - - def serialize(self, to_serialize: CustomModel, gcs_path: str) -> str: - return gcs_path - - def deserialize(self, serialized_gcs_path: str, **kwargs) -> CustomModel: - assert "param1" in kwargs - # Pretend that the model is trained - return CustomModel(weight=1) # noqa: F821 - - developer.register_serializer(CustomModel, CustomSerializer) - CustomSerializer.register_requirements(["custom_dependency==1.0.0"]) - - fake_gcs_path = os.fspath(tmp_path / "job_id/input/custom_model") - os.makedirs(tmp_path / "job_id/input") - local_metadata_path = ( - tmp_path - / f"job_id/input/{serializers_base.SERIALIZATION_METADATA_FILENAME}_custom_model.json" - ) - - # Write the local metadata - with open(local_metadata_path, "wb") as f: - f.write( - json.dumps( - { - serializers_base.SERIALIZATION_METADATA_SERIALIZER_KEY: "CustomSerializer", - serializers_base.SERIALIZATION_METADATA_DEPENDENCIES_KEY: [ - "custom_dependency==1.0.0" - ], - } - ).encode("utf-8") - ) - # Write the global metadata - global_metadata_path = ( - tmp_path / "job_id/input/serialization_global_metadata.json" - ) - global_metadata = { - "serializer": "AnySerializer", - "dependencies": [], - "serialized": { - str(fake_gcs_path): { - "serialization_id": "id_holder", - "serializer_args": {"param1": {"value": 1}}, - }, - }, - } - with open(global_metadata_path, "wb") as f: - f.write(json.dumps(global_metadata).encode("utf-8")) - - custom_serializer_path = tmp_path / "job_id/input/CustomSerializer" - - # Load global metadata - any_serializer_instance.load_global_metadata(str(global_metadata_path)) - - # Act - with mock.patch.object( - serializers.CloudPickleSerializer, - "deserialize", - autospec=True, - return_value=CustomSerializer(), - ) as mock_cloudpickle_deserialize: - deserialized_custom_model = any_serializer_instance.deserialize( - fake_gcs_path - ) - - # Assert - del CustomModel - deserialized_custom_model.weight = 1 - # CloudPickleSerializer.deserialize() is called to deserialize the - # CustomSerializer. - mock_cloudpickle_deserialize.assert_called_once_with( - any_serializer_instance._instances[serializers.CloudPickleSerializer], - serialized_gcs_path=str(custom_serializer_path), - ) - - @pytest.mark.usefixtures("mock_gcs_upload") - def test_any_serializer_deserialize_sklearn_estimator( - self, any_serializer_instance, tmp_path, mock_sklearn_estimator_deserialize - ): - # Arrange - fake_gcs_path = os.fspath(tmp_path / "job_id/input/input_estimator") - os.makedirs(tmp_path / "job_id/input") - metadata_path = ( - tmp_path - / f"job_id/input/{serializers_base.SERIALIZATION_METADATA_FILENAME}_input_estimator.json" - ) - with open(metadata_path, "wb") as f: - f.write( - json.dumps( - { - serializers_base.SERIALIZATION_METADATA_SERIALIZER_KEY: "SklearnEstimatorSerializer", - serializers_base.SERIALIZATION_METADATA_DEPENDENCIES_KEY: [], - } - ).encode("utf-8") - ) - - # Act - _ = any_serializer_instance.deserialize(fake_gcs_path) - - # Assert - mock_sklearn_estimator_deserialize.assert_called_once_with( - any_serializer_instance._instances[serializers.SklearnEstimatorSerializer], - serialized_gcs_path=fake_gcs_path, - ) - - @pytest.mark.usefixtures("mock_gcs_upload") - def test_any_serializer_deserialize_keras_model( - self, any_serializer_instance, tmp_path, mock_keras_model_deserialize - ): - # Arrange - fake_gcs_path = os.fspath(tmp_path / "job_id/input/input_estimator") - os.makedirs(tmp_path / "job_id/input") - metadata_path = ( - tmp_path - / f"job_id/input/{serializers_base.SERIALIZATION_METADATA_FILENAME}_input_estimator.json" - ) - with open(metadata_path, "wb") as f: - f.write( - json.dumps( - { - serializers_base.SERIALIZATION_METADATA_SERIALIZER_KEY: "KerasModelSerializer", - serializers_base.SERIALIZATION_METADATA_DEPENDENCIES_KEY: [], - } - ).encode("utf-8") - ) - - # Act - _ = any_serializer_instance.deserialize(fake_gcs_path) - - # Assert - mock_keras_model_deserialize.assert_called_once_with( - any_serializer_instance._instances[serializers.KerasModelSerializer], - serialized_gcs_path=fake_gcs_path, - ) - - @pytest.mark.usefixtures("mock_gcs_upload") - def test_any_serializer_deserialize_torch_model( - self, any_serializer_instance, tmp_path, mock_torch_model_deserialize - ): - # Arrange - fake_gcs_path = os.fspath(tmp_path / "job_id/input/input_estimator") - os.makedirs(tmp_path / "job_id/input") - metadata_path = ( - tmp_path - / f"job_id/input/{serializers_base.SERIALIZATION_METADATA_FILENAME}_input_estimator.json" - ) - with open(metadata_path, "wb") as f: - f.write( - json.dumps( - { - serializers_base.SERIALIZATION_METADATA_SERIALIZER_KEY: "TorchModelSerializer", - serializers_base.SERIALIZATION_METADATA_DEPENDENCIES_KEY: [], - } - ).encode("utf-8") - ) - - # Act - _ = any_serializer_instance.deserialize(fake_gcs_path) - - # Assert - mock_torch_model_deserialize.assert_called_once_with( - any_serializer_instance._instances[serializers.TorchModelSerializer], - serialized_gcs_path=fake_gcs_path, - ) - - @pytest.mark.usefixtures("mock_gcs_upload") - def test_any_serializer_deserialize_dataframe( - self, any_serializer_instance, tmp_path, mock_pandas_data_deserialize - ): - # Arrange - fake_gcs_path = os.fspath(tmp_path / "job_id/input/X") - os.makedirs(tmp_path / "job_id/input") - metadata_path = ( - tmp_path - / f"job_id/input/{serializers_base.SERIALIZATION_METADATA_FILENAME}_X.json" - ) - with open(metadata_path, "wb") as f: - f.write( - json.dumps( - { - serializers_base.SERIALIZATION_METADATA_SERIALIZER_KEY: "PandasDataSerializer", - serializers_base.SERIALIZATION_METADATA_DEPENDENCIES_KEY: [], - } - ).encode("utf-8") - ) - - # Act - _ = any_serializer_instance.deserialize(fake_gcs_path) - - # Assert - mock_pandas_data_deserialize.assert_called_once_with( - any_serializer_instance._instances[serializers.PandasDataSerializer], - serialized_gcs_path=fake_gcs_path, - ) - - @pytest.mark.usefixtures("mock_gcs_upload") - def test_any_serializer_deserialize_torch_dataloader( - self, any_serializer_instance, tmp_path, mock_torch_dataloader_deserialize - ): - # Arrange - fake_gcs_path = os.fspath(tmp_path / "job_id/input/dataloader") - os.makedirs(tmp_path / "job_id/input") - metadata_path = ( - tmp_path - / f"job_id/input/{serializers_base.SERIALIZATION_METADATA_FILENAME}_dataloader.json" - ) - with open(metadata_path, "wb") as f: - f.write( - json.dumps( - { - serializers_base.SERIALIZATION_METADATA_SERIALIZER_KEY: "TorchDataLoaderSerializer", - serializers_base.SERIALIZATION_METADATA_DEPENDENCIES_KEY: [], - } - ).encode("utf-8") - ) - - # Act - _ = any_serializer_instance.deserialize(fake_gcs_path) - - # Assert - mock_torch_dataloader_deserialize.assert_called_once_with( - any_serializer_instance._instances[serializers.TorchDataLoaderSerializer], - serialized_gcs_path=fake_gcs_path, - ) - - @pytest.mark.usefixtures("mock_gcs_upload") - def test_any_serializer_deserialize_bigframe_sklearn( - self, any_serializer_instance, tmp_path, mock_bigframe_deserialize_sklearn - ): - # Arrange - fake_gcs_path = os.fspath(tmp_path / "job_id/input/X") - os.makedirs(tmp_path / "job_id/input") - metadata_path = ( - tmp_path - / f"job_id/input/{serializers_base.SERIALIZATION_METADATA_FILENAME}_X.json" - ) - with open(metadata_path, "wb") as f: - f.write( - json.dumps( - { - serializers_base.SERIALIZATION_METADATA_SERIALIZER_KEY: "BigframeSerializer", - serializers_base.SERIALIZATION_METADATA_DEPENDENCIES_KEY: [], - serializers.SERIALIZATION_METADATA_FRAMEWORK_KEY: "sklearn", - } - ).encode("utf-8") - ) - - # Act (step 2) - _ = any_serializer_instance.deserialize(fake_gcs_path) - - # Assert - mock_bigframe_deserialize_sklearn.assert_called_once_with( - any_serializer_instance._instances[serializers.BigframeSerializer], - serialized_gcs_path=fake_gcs_path, - ) - - @pytest.mark.usefixtures("mock_gcs_upload") - def test_any_serializer_deserialize_bigframe_torch( - self, any_serializer_instance, tmp_path, mock_bigframe_deserialize_torch - ): - # Arrange - fake_gcs_path = os.fspath(tmp_path / "job_id/input/X") - os.makedirs(tmp_path / "job_id/input") - metadata_path = ( - tmp_path - / f"job_id/input/{serializers_base.SERIALIZATION_METADATA_FILENAME}_X.json" - ) - with open(metadata_path, "wb") as f: - f.write( - json.dumps( - { - serializers_base.SERIALIZATION_METADATA_SERIALIZER_KEY: "BigframeSerializer", - serializers_base.SERIALIZATION_METADATA_DEPENDENCIES_KEY: [], - serializers.SERIALIZATION_METADATA_FRAMEWORK_KEY: "torch", - } - ).encode("utf-8") - ) - - # Act (step 2) - _ = any_serializer_instance.deserialize(fake_gcs_path) - - # Assert - mock_bigframe_deserialize_torch.assert_called_once_with( - any_serializer_instance._instances[serializers.BigframeSerializer], - serialized_gcs_path=fake_gcs_path, - ) - - @pytest.mark.usefixtures("mock_gcs_upload") - def test_any_serializer_deserialize_bigframe_tensorflow( - self, any_serializer_instance, tmp_path, mock_bigframe_deserialize_tensorflow - ): - # Arrange - fake_gcs_path = os.fspath(tmp_path / "job_id/input/X") - os.makedirs(tmp_path / "job_id/input") - metadata_path = ( - tmp_path - / f"job_id/input/{serializers_base.SERIALIZATION_METADATA_FILENAME}_X.json" - ) - with open(metadata_path, "wb") as f: - f.write( - json.dumps( - { - serializers_base.SERIALIZATION_METADATA_SERIALIZER_KEY: "BigframeSerializer", - serializers_base.SERIALIZATION_METADATA_DEPENDENCIES_KEY: [], - serializers.SERIALIZATION_METADATA_FRAMEWORK_KEY: "tensorflow", - } - ).encode("utf-8") - ) - - # Act (step 2) - _ = any_serializer_instance.deserialize(fake_gcs_path) - - # Assert - mock_bigframe_deserialize_tensorflow.assert_called_once_with( - any_serializer_instance._instances[serializers.BigframeSerializer], - serialized_gcs_path=fake_gcs_path, - batch_size=None, - target_col=None, - ) - - def test_any_serializer_deserialize_tf_dataset( - self, any_serializer_instance, tmp_path, mock_tf_dataset_deserialize - ): - # Arrange - fake_gcs_path = os.fspath(tmp_path / "job_id/input/X") - os.makedirs(tmp_path / "job_id/input") - metadata_path = ( - tmp_path - / f"job_id/input/{serializers_base.SERIALIZATION_METADATA_FILENAME}_X.json" - ) - with open(metadata_path, "wb") as f: - f.write( - json.dumps( - { - serializers_base.SERIALIZATION_METADATA_SERIALIZER_KEY: "TFDatasetSerializer", - serializers_base.SERIALIZATION_METADATA_DEPENDENCIES_KEY: [ - "tensorflow==1.0.0" - ], - } - ).encode("utf-8") - ) - - # Act - any_serializer_instance.deserialize(fake_gcs_path) - - # Assert - mock_tf_dataset_deserialize.assert_called_once_with( - any_serializer_instance._instances[serializers.TFDatasetSerializer], - serialized_gcs_path=fake_gcs_path, - ) - - @pytest.mark.usefixtures("mock_gcs_upload") - def test_any_serializer_deserialize_general_object( - self, any_serializer_instance, tmp_path, mock_cloudpickle_deserialize - ): - # Arrange - fake_gcs_path = os.fspath(tmp_path / "job_id/input/general_object.cpkl") - os.makedirs(tmp_path / "job_id/input") - metadata_path = ( - tmp_path - / f"job_id/input/{serializers_base.SERIALIZATION_METADATA_FILENAME}_general_object.json" - ) - with open(metadata_path, "wb") as f: - f.write( - json.dumps( - { - serializers_base.SERIALIZATION_METADATA_SERIALIZER_KEY: "CloudPickleSerializer", - serializers_base.SERIALIZATION_METADATA_DEPENDENCIES_KEY: [], - } - ).encode("utf-8") - ) - - # Act - _ = any_serializer_instance.deserialize(fake_gcs_path) - - # Assert - mock_cloudpickle_deserialize.assert_called_once_with( - any_serializer_instance._instances[serializers.CloudPickleSerializer], - serialized_gcs_path=fake_gcs_path, - ) - - def test_any_serializer_deserialize_raise_runtime_error_when_dependency_cannot_be_imported( - self, tmp_path, any_serializer_instance - ): - # Arrange - fake_gcs_path = os.fspath(tmp_path / "job_id/input/general_object.cpkl") - os.makedirs(tmp_path / "job_id/input") - metadata_path = ( - tmp_path - / f"job_id/input/{serializers_base.SERIALIZATION_METADATA_FILENAME}_general_object.json" - ) - with open(metadata_path, "wb") as f: - f.write( - json.dumps( - { - serializers_base.SERIALIZATION_METADATA_SERIALIZER_KEY: "CloudPickleSerializer", - serializers_base.SERIALIZATION_METADATA_DEPENDENCIES_KEY: [ - "nonexisting_module==1.0.0", - ], - } - ).encode("utf-8") - ) - - # Act & Assert - with pytest.raises(RuntimeError, match="nonexisting_module is not installed"): - _ = any_serializer_instance.deserialize(fake_gcs_path) - - @mock.patch.object(serializers, "_is_valid_gcs_path", return_value=True) - def test_any_serializer_deserialize_raises_warning_when_version_mismatched( - self, mock_gcs_path_validation, tmp_path, caplog, any_serializer_instance - ): - # Arrange - fake_gcs_path = os.fspath(tmp_path / "job_id/input/general_object.cpkl") - os.makedirs(tmp_path / "job_id/input") - metadata_path = ( - tmp_path - / f"job_id/input/{serializers_base.SERIALIZATION_METADATA_FILENAME}_general_object.json" - ) - with open(metadata_path, "wb") as f: - f.write( - json.dumps( - { - serializers_base.SERIALIZATION_METADATA_SERIALIZER_KEY: "CloudPickleSerializer", - serializers_base.SERIALIZATION_METADATA_DEPENDENCIES_KEY: [ - "sklearn==1.0.0", - ], - } - ).encode("utf-8") - ) - with open(fake_gcs_path, "wb") as f: - f.write(cloudpickle.dumps([1, 2, 3], protocol=constants.PICKLE_PROTOCOL)) - - # Act - _ = any_serializer_instance.deserialize(fake_gcs_path) - # Assert - # The current sklearn version in google3 will changing, but it's a later - # version than 1.0.0 - with caplog.at_level(level=20, logger="vertexai.serialization_engine"): - assert "sklearn's version is" in caplog.text - assert "while the required version is ==1.0.0" in caplog.text - - -def test_get_arg_path_from_file_gcs_uri(): - gcs_uri = "gs://bucket/job_path/input/estimator" - arg_path = any_serializer.get_arg_path_from_file_gcs_uri( - gcs_uri=gcs_uri, arg_name="input_func" - ) - assert arg_path == "gs://bucket/job_path/input/serialization_args/input_func" - - -class TestSerializerArg: - @pytest.mark.parametrize( - "d, expected_value, expected_gcs_path", - [ - ({"value": 1}, 1, None), - ( - {"gcs_path": "gs://path-of-serializer-arg"}, - None, - "gs://path-of-serializer-arg", - ), - ], - ids=[ - "Value Present", - "GCS Path Present", - ], - ) - def test_from_dict(self, d, expected_value, expected_gcs_path): - serializer_args = any_serializer.SerializerArg.from_dict(d) - assert serializer_args.value == expected_value - assert serializer_args.gcs_path == expected_gcs_path - - @pytest.mark.parametrize( - "d", - [ - {"value": 1, "gcs_path": "gs://path-of-serializer-arg"}, - {"value": 0, "gcs_path": "gs://path-of-serializer-arg"}, - ], - ids=[ - "Nonzero Value Present", - "Zero Value Present", - ], - ) - def test_from_dict_raises_value_error(self, d): - with pytest.raises( - ValueError, match="Only one of value or gcs_path should be provided" - ): - _ = any_serializer.SerializerArg.from_dict(d) - - @pytest.mark.parametrize( - "serializer_arg, expected_dict", - [ - (any_serializer.SerializerArg(value=1), {"value": 1, "gcs_path": None}), - ( - any_serializer.SerializerArg(gcs_path="gs://path-of-serializer-arg"), - {"value": None, "gcs_path": "gs://path-of-serializer-arg"}, - ), - ], - ids=[ - "Value Presents", - "GCS Path Presents", - ], - ) - def test_to_dict(self, serializer_arg, expected_dict): - returned_dict = serializer_arg.to_dict() - assert returned_dict == expected_dict - - @pytest.mark.parametrize( - "serializer_arg, expected_dict", - [ - (any_serializer.SerializerArg(value=1), {"value": 1, "gcs_path": None}), - ( - any_serializer.SerializerArg(gcs_path="gs://path-of-serializer-arg"), - {"value": None, "gcs_path": "gs://path-of-serializer-arg"}, - ), - ], - ids=[ - "Value Presents", - "GCS Path Presents", - ], - ) - def test_to_jsonable_dict(self, serializer_arg, expected_dict): - returned_dict = serializer_arg.to_jsonable_dict() - assert returned_dict == expected_dict - - -class TestSerializationEntryMetadata: - def test_from_dict(self): - # Arrange - class RandomClass: - pass - - serialized_obj = RandomClass() - d = { - "serialization_id": "serialized_with_two_args", - "obj": serialized_obj, - "serializer_args": { - "arg1": {"value": 1, "gcs_path": None}, - "arg2": {"value": None, "gcs_path": "gs://path-of-serializer-arg"}, - }, - } - - expected_serialized_entry_metadata = any_serializer.SerializedEntryMetadata( - serialization_id="serialized_with_two_args", - serializer_args={ - "arg1": any_serializer.SerializerArg(value=1), - "arg2": any_serializer.SerializerArg( - gcs_path="gs://path-of-serializer-arg" - ), - }, - obj=serialized_obj, - ) - - # Act - serialized_entry_metadata = any_serializer.SerializedEntryMetadata.from_dict(d) - - # Assert - assert ( - serialized_entry_metadata.serialization_id - == expected_serialized_entry_metadata.serialization_id - ) - assert ( - serialized_entry_metadata.serializer_args - == expected_serialized_entry_metadata.serializer_args - ) - assert serialized_entry_metadata.obj == expected_serialized_entry_metadata.obj - - def test_to_jsonable_dict(self): - # Arrange - class RandomClass: - pass - - serialized_args = { - "arg1": any_serializer.SerializerArg(value=1), - "arg2": any_serializer.SerializerArg( - gcs_path="gs://path-of-serializer-arg" - ), - } - serialized_entry = any_serializer.SerializedEntryMetadata( - serialization_id="serialized_with_two_args", - serializer_args=serialized_args, - obj=RandomClass(), - ) - - expected_dict = { - "serialization_id": "serialized_with_two_args", - "serializer_args": { - "arg1": {"value": 1, "gcs_path": None}, - "arg2": {"value": None, "gcs_path": "gs://path-of-serializer-arg"}, - }, - } - - # Act - returned_dict = serialized_entry.to_jsonable_dict() - - # Assert - assert returned_dict == expected_dict - - def test_to_json(self): - # Arrange - class RandomClass: - pass - - serialized_args = { - "arg1": any_serializer.SerializerArg(value=1), - "arg2": any_serializer.SerializerArg( - gcs_path="gs://path-of-serializer-arg" - ), - } - serialized_obj = RandomClass() - serialized_entry = any_serializer.SerializedEntryMetadata( - serialization_id="serialized_with_two_args", - serializer_args=serialized_args, - obj=serialized_obj, - ) - - expected_dict = { - "serialization_id": "serialized_with_two_args", - "obj": serialized_obj, - "serializer_args": { - "arg1": {"value": 1, "gcs_path": None}, - "arg2": {"value": None, "gcs_path": "gs://path-of-serializer-arg"}, - }, - } - - # Act - returned_dict = serialized_entry.to_dict() - - # Assert - assert returned_dict == expected_dict - - -class TestAnySerializerMetadata: - def test_from_dict(self): - serialized_obj1_gcs_path = "gs://bucket/job_dir/input/random_obj1" - serialized_obj2_gcs_path = "gs://bucket/job_dir/input/random_obj2" - d = { - "custom_commands": [], - "dependencies": [], - "serializer": "AnySerializer", - "serialized": { - serialized_obj1_gcs_path: { - "serialization_id": "random_obj", - "serializer_args": { - "arg1": {"value": 1, "gcs_path": None}, - "arg2": { - "value": None, - "gcs_path": "gs://path-of-serializer-arg", - }, - }, - }, - serialized_obj2_gcs_path: { - "serialization_id": "random_obj2", - "serializer_args": { - "arg1": {"value": 2.0, "gcs_path": None}, - "arg2": { - "value": None, - "gcs_path": "gs://path-of-serializer-arg", - }, - }, - }, - }, - } - any_serilaizer_metadata = any_serializer.AnySerializationMetadata.from_dict(d) - assert ( - any_serilaizer_metadata.serialized[ - serialized_obj1_gcs_path - ].serialization_id - == "random_obj" - ) - assert any_serilaizer_metadata.serialized[ - serialized_obj1_gcs_path - ].serializer_args == { - "arg1": any_serializer.SerializerArg(value=1), - "arg2": any_serializer.SerializerArg( - gcs_path="gs://path-of-serializer-arg" - ), - } - assert ( - any_serilaizer_metadata.serialized[ - serialized_obj2_gcs_path - ].serialization_id - == "random_obj2" - ) - assert any_serilaizer_metadata.serialized[ - serialized_obj2_gcs_path - ].serializer_args == { - "arg1": any_serializer.SerializerArg(value=2.0), - "arg2": any_serializer.SerializerArg( - gcs_path="gs://path-of-serializer-arg" - ), - } diff --git a/tests/unit/vertexai/test_data_serializer_dev.py b/tests/unit/vertexai/test_data_serializer_dev.py deleted file mode 100644 index f385528260..0000000000 --- a/tests/unit/vertexai/test_data_serializer_dev.py +++ /dev/null @@ -1,149 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from vertexai.preview._workflow.serialization_engine import ( - serializers, -) -import numpy as np -import pandas as pd -import pytest - - -_TEST_BUCKET = "gs://staging-bucket" -_TEST_FILE_NAME = "data.parquet" -_TEST_GCS_URI = f"{_TEST_BUCKET}/{_TEST_FILE_NAME}" - - -@pytest.mark.usefixtures("mock_storage_blob_tmp_dir", "google_auth_mock") -class TestPandasDataSerializerDev: - def test_base_case(self, tmp_path): - df = pd.DataFrame(np.zeros(shape=[3, 3])) - df_serializer = serializers.PandasDataSerializerDev() - - df_serializer.serialize(df, _TEST_GCS_URI) - restored_df = df_serializer.deserialize(tmp_path / _TEST_FILE_NAME) - pd.testing.assert_frame_equal(df, restored_df) - - def test_base_case_with_nans(self, tmp_path): - df = pd.DataFrame(np.zeros(shape=[3, 3])) - df.iat[0, 0] = np.nan - df_serializer = serializers.PandasDataSerializerDev() - - df_serializer.serialize(df, _TEST_GCS_URI) - restored_df = df_serializer.deserialize(tmp_path / _TEST_FILE_NAME) - pd.testing.assert_frame_equal(df, restored_df) - - def test_df_with_string_col_names_and_nans(self, tmp_path): - df = pd.DataFrame(np.zeros(shape=[3, 3])) - df.columns = ["0", "1", "2"] - df.iat[0, 0] = np.nan - df_serializer = serializers.PandasDataSerializerDev() - - df_serializer.serialize(df, _TEST_GCS_URI) - restored_df = df_serializer.deserialize(tmp_path / _TEST_FILE_NAME) - pd.testing.assert_frame_equal(df, restored_df) - - def test_df_with_mixed_data_types_and_nans(self, tmp_path): - df = pd.DataFrame(np.zeros(shape=[3, 3])) - df.columns = ["0", "1", "2"] - df.insert(3, "string_col", ["0", np.nan, "2"]) - df_serializer = serializers.PandasDataSerializerDev() - - df_serializer.serialize(df, _TEST_GCS_URI) - restored_df = df_serializer.deserialize(tmp_path / _TEST_FILE_NAME) - pd.testing.assert_frame_equal(df, restored_df) - - def test_df_with_mixed_data_types_and_row_indices(self, tmp_path): - df = pd.DataFrame(np.zeros(shape=[3, 3])) - df.columns = ["0", "1", "2"] - df.insert(3, "string_col", ["0", np.nan, "2"]) - df.index = ["0", "1", "2"] - df_serializer = serializers.PandasDataSerializerDev() - - df_serializer.serialize(df, _TEST_GCS_URI) - restored_df = df_serializer.deserialize(tmp_path / _TEST_FILE_NAME) - pd.testing.assert_frame_equal(df, restored_df) - - def test_df_with_mixed_data_types_and_non_string_col_names(self, tmp_path): - df = pd.DataFrame(np.zeros(shape=[3, 3])) - df.columns = ["str_col_0", "str_col_1", 2] - df.insert(3, "string_col", ["0", np.nan, "2"]) - df.index = ["0", "1", "2"] - df_serializer = serializers.PandasDataSerializerDev() - - df_serializer.serialize(df, _TEST_GCS_URI) - restored_df = df_serializer.deserialize(tmp_path / _TEST_FILE_NAME) - pd.testing.assert_frame_equal(df, restored_df) - - def test_df_with_mixed_data_types_and_non_string_row_indices(self, tmp_path): - df = pd.DataFrame(np.zeros(shape=[3, 3])) - df.columns = ["0", "1", "2"] - df.insert(3, "string_col", ["0", np.nan, "2"]) - df.index = ["0", "1", 2] - df_serializer = serializers.PandasDataSerializerDev() - - df_serializer.serialize(df, _TEST_GCS_URI) - restored_df = df_serializer.deserialize(tmp_path / _TEST_FILE_NAME) - pd.testing.assert_frame_equal(df, restored_df) - - def test_df_with_mixed_data_types_and_categorical_data(self, tmp_path): - df = pd.DataFrame(np.zeros(shape=[3, 3])) - df.columns = ["0", "1", "2"] - df.insert(3, "string_col", ["0", np.nan, "2"]) - df.index = ["0", "1", 2] - df.insert(4, "categorical_str_col", ["A", "B", "C"]) - df["categorical_str_col"] = df["categorical_str_col"].astype("category") - df.insert(5, "categorical_int_col", [0, np.nan, 2]) - df["categorical_int_col"] = df["categorical_int_col"].astype("category") - df_serializer = serializers.PandasDataSerializerDev() - - df_serializer.serialize(df, _TEST_GCS_URI) - restored_df = df_serializer.deserialize(tmp_path / _TEST_FILE_NAME) - pd.testing.assert_frame_equal(df, restored_df) - - def test_df_with_mixed_data_types_and_ordered_categorical_data(self, tmp_path): - df = pd.DataFrame(np.zeros(shape=[3, 3])) - df.columns = ["0", "1", "2"] - df.insert(3, "string_col", ["0", np.nan, "2"]) - df.index = ["0", "1", 2] - df.insert(4, "categorical_str_col", ["A", "B", "C"]) - df["categorical_str_col"] = df["categorical_str_col"].astype("category") - df.insert(5, "categorical_int_col", [0, np.nan, 2]) - df["categorical_int_col"] = df["categorical_int_col"].astype("category") - df.insert( - 6, - "orderd_categorical", - pd.Categorical([1, 2, 3], ordered=True, categories=[2, 1, 3]), - ) - df_serializer = serializers.PandasDataSerializerDev() - - df_serializer.serialize(df, _TEST_GCS_URI) - restored_df = df_serializer.deserialize(tmp_path / _TEST_FILE_NAME) - pd.testing.assert_frame_equal(df, restored_df) - - def test_df_with_multiindex_and_all_string_indices(self, tmp_path): - arrays = [ - ["bar", "bar", "baz", "baz", "foo", "foo"], - ["one", "two", "one", "two", "one", "two"], - ] - - index = pd.MultiIndex.from_tuples(list(zip(*arrays)), names=["first", "second"]) - df = pd.DataFrame(np.zeros(shape=[6, 6]), index=index[:6], columns=index[:6]) - df_serializer = serializers.PandasDataSerializerDev() - - df_serializer.serialize(df, _TEST_GCS_URI) - restored_df = df_serializer.deserialize(tmp_path / _TEST_FILE_NAME) - pd.testing.assert_frame_equal(df, restored_df) diff --git a/tests/unit/vertexai/test_developer_mark.py b/tests/unit/vertexai/test_developer_mark.py deleted file mode 100644 index 0bcbbc5a29..0000000000 --- a/tests/unit/vertexai/test_developer_mark.py +++ /dev/null @@ -1,295 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import functools - -import vertexai -from vertexai.preview._workflow import driver -from vertexai.preview._workflow.driver import remote -from vertexai.preview._workflow.executor import ( - remote_container_training, -) -from vertexai.preview._workflow.shared import ( - configs, -) -from vertexai.preview.developer import remote_specs -import pytest - -# RemoteConfig constants -_TEST_DISPLAY_NAME = "test_display_name" -_TEST_STAGING_BUCKET = "gs://test-staging-bucket" -_TEST_CONTAINER_URI = "gcr.io/test-image" -_TEST_MACHINE_TYPE = "n1-standard-4" -_TEST_SERVICE_ACCOUNT = "test-service-account" -_TEST_WORKER_POOL_SPECS = remote_specs.WorkerPoolSpecs( - chief=remote_specs.WorkerPoolSpec( - machine_type=_TEST_MACHINE_TYPE, - ) -) - -_TEST_TRAINING_CONFIG = configs.RemoteConfig( - display_name=_TEST_DISPLAY_NAME, - staging_bucket=_TEST_STAGING_BUCKET, - container_uri=_TEST_CONTAINER_URI, - machine_type=_TEST_MACHINE_TYPE, - service_account=_TEST_SERVICE_ACCOUNT, -) - -_TEST_TRAINING_CONFIG_WORKER_POOL = configs.RemoteConfig( - display_name=_TEST_DISPLAY_NAME, - staging_bucket=_TEST_STAGING_BUCKET, - container_uri=_TEST_CONTAINER_URI, - worker_pool_specs=_TEST_WORKER_POOL_SPECS, - service_account=_TEST_SERVICE_ACCOUNT, -) - -# Remote training custom job constants -_TEST_IMAGE_URI = "test_image_uri" -_TEST_REPLICA_COUNT = 1 -_TEST_ACCELERATOR_COUNT = 8 -_TEST_ACCELERATOR_TYPE = "NVIDIA_TESLA_K80" -_TEST_BOOT_DISK_TYPE = "test_boot_disk_type" -_TEST_BOOT_DISK_SIZE_GB = 10 -_TEST_REMOTE_CONTAINER_TRAINING_CONFIG = configs.DistributedTrainingConfig( - display_name=_TEST_DISPLAY_NAME, - staging_bucket=_TEST_STAGING_BUCKET, - machine_type=_TEST_MACHINE_TYPE, - replica_count=_TEST_REPLICA_COUNT, - accelerator_count=_TEST_ACCELERATOR_COUNT, - accelerator_type=_TEST_ACCELERATOR_TYPE, - boot_disk_type=_TEST_BOOT_DISK_TYPE, - boot_disk_size_gb=_TEST_BOOT_DISK_SIZE_GB, -) -_TEST_REMOTE_CONTAINER_TRAINING_CONFIG_WORKER_POOL = configs.DistributedTrainingConfig( - display_name=_TEST_DISPLAY_NAME, - staging_bucket=_TEST_STAGING_BUCKET, - worker_pool_specs=_TEST_WORKER_POOL_SPECS, -) - -_TEST_PROJECT = "test-project" -_TEST_LOCATION = "us-central1" - - -class TestDeveloperMark: - def test_mark_train(self): - class TestClass(vertexai.preview.VertexModel): - @vertexai.preview.developer.mark.train() - def test_method(x, y): - return x + y - - assert isinstance(TestClass.test_method, driver.VertexRemoteFunctor) - assert TestClass.test_method.vertex == configs.VertexConfig - - test_class = TestClass() - - assert isinstance(test_class.test_method, driver.VertexRemoteFunctor) - assert isinstance(test_class.test_method.vertex, configs.VertexConfig) - - @pytest.mark.usefixtures("google_auth_mock") - def test_mark_train_with_all_args(self): - class TestClass(vertexai.preview.VertexModel): - @vertexai.preview.developer.mark.train(remote_config=_TEST_TRAINING_CONFIG) - def test_method(self, x, y): - return x + y - - test_class = TestClass() - - assert isinstance(test_class.test_method, driver.VertexRemoteFunctor) - assert ( - test_class.test_method.vertex.remote_config.display_name - == _TEST_DISPLAY_NAME - ) - assert ( - test_class.test_method.vertex.remote_config.staging_bucket - == _TEST_STAGING_BUCKET - ) - assert ( - test_class.test_method.vertex.remote_config.container_uri - == _TEST_CONTAINER_URI - ) - assert ( - test_class.test_method.vertex.remote_config.machine_type - == _TEST_MACHINE_TYPE - ) - assert ( - test_class.test_method.vertex.remote_config.service_account - == _TEST_SERVICE_ACCOUNT - ) - - @pytest.mark.usefixtures("google_auth_mock") - def test_mark_train_with_worker_pool_specs(self): - class TestClass(vertexai.preview.VertexModel): - @vertexai.preview.developer.mark.train( - remote_config=_TEST_TRAINING_CONFIG_WORKER_POOL - ) - def test_method(self, x, y): - return x + y - - test_class = TestClass() - - assert isinstance(test_class.test_method, driver.VertexRemoteFunctor) - assert ( - test_class.test_method.vertex.remote_config.display_name - == _TEST_DISPLAY_NAME - ) - assert ( - test_class.test_method.vertex.remote_config.staging_bucket - == _TEST_STAGING_BUCKET - ) - assert ( - test_class.test_method.vertex.remote_config.container_uri - == _TEST_CONTAINER_URI - ) - assert ( - test_class.test_method.vertex.remote_config.worker_pool_specs - == _TEST_WORKER_POOL_SPECS - ) - - # pylint: disable=missing-function-docstring,protected-access) - @pytest.mark.parametrize( - "remote_config,expected_config", - [ - ( - _TEST_REMOTE_CONTAINER_TRAINING_CONFIG, - _TEST_REMOTE_CONTAINER_TRAINING_CONFIG, - ), - (None, configs.DistributedTrainingConfig()), - ( - _TEST_REMOTE_CONTAINER_TRAINING_CONFIG_WORKER_POOL, - _TEST_REMOTE_CONTAINER_TRAINING_CONFIG_WORKER_POOL, - ), - ], - ) - def test_mark_remote_container_train(self, remote_config, expected_config): - test_additional_data = [remote_specs._InputParameterSpec("arg_0")] - - # pylint: disable=missing-class-docstring - class MockTrainer(remote.VertexModel): - - # pylint: disable=invalid-name,missing-function-docstring - @vertexai.preview.developer.mark._remote_container_train( - image_uri=_TEST_IMAGE_URI, - additional_data=test_additional_data, - remote_config=remote_config, - ) - def fit(self): - return - - assert isinstance(MockTrainer.fit, driver.VertexRemoteFunctor) - assert isinstance(MockTrainer.fit.vertex, functools.partial) - assert MockTrainer.fit.vertex.func == configs.VertexConfig - assert not MockTrainer.fit.vertex.args - assert MockTrainer.fit.vertex.keywords == { - "remote_config": expected_config, - "remote": True, - } - - test_trainer = MockTrainer() - assert isinstance(test_trainer.fit, driver.VertexRemoteFunctor) - assert test_trainer.fit.vertex.remote_config == expected_config - assert test_trainer.fit._remote_executor is remote_container_training.train - assert test_trainer.fit._remote_executor_kwargs == { - "additional_data": test_additional_data, - "image_uri": _TEST_IMAGE_URI, - } - assert test_trainer.fit.vertex.remote - - # pylint: disable=missing-function-docstring,protected-access - def test_mark_remote_container_train_override_remote_config(self): - # pylint: disable=missing-class-docstring - class MockTrainer(remote.VertexModel): - - # pylint: disable=invalid-name,missing-function-docstring - @vertexai.preview.developer.mark._remote_container_train( - image_uri=_TEST_IMAGE_URI, - additional_data=[], - remote_config=configs.DistributedTrainingConfig(), - ) - def fit(self): - return - - test_trainer = MockTrainer() - assert isinstance(test_trainer.fit, driver.VertexRemoteFunctor) - assert ( - test_trainer.fit.vertex.remote_config == configs.DistributedTrainingConfig() - ) - assert test_trainer.fit._remote_executor is remote_container_training.train - assert test_trainer.fit._remote_executor_kwargs == { - "additional_data": [], - "image_uri": _TEST_IMAGE_URI, - } - - # Overrides training config - test_remote_config = test_trainer.fit.vertex.remote_config - test_remote_config.display_name = _TEST_DISPLAY_NAME - test_remote_config.staging_bucket = _TEST_STAGING_BUCKET - test_remote_config.machine_type = _TEST_MACHINE_TYPE - test_remote_config.replica_count = _TEST_REPLICA_COUNT - test_remote_config.accelerator_type = _TEST_ACCELERATOR_TYPE - test_remote_config.accelerator_count = _TEST_ACCELERATOR_COUNT - test_remote_config.boot_disk_type = _TEST_BOOT_DISK_TYPE - test_remote_config.boot_disk_size_gb = _TEST_BOOT_DISK_SIZE_GB - - assert ( - test_trainer.fit.vertex.remote_config - == _TEST_REMOTE_CONTAINER_TRAINING_CONFIG - ) - - def test_mark_predict(self): - class TestClass(vertexai.preview.VertexModel): - @vertexai.preview.developer.mark.predict() - def test_method(x, y): - return x + y - - assert isinstance(TestClass.test_method, driver.VertexRemoteFunctor) - assert TestClass.test_method.vertex == configs.VertexConfig - - test_class = TestClass() - - assert isinstance(test_class.test_method, driver.VertexRemoteFunctor) - assert isinstance(test_class.test_method.vertex, configs.VertexConfig) - - def test_mark_predict_with_all_args(self): - class TestClass(vertexai.preview.VertexModel): - @vertexai.preview.developer.mark.predict( - remote_config=_TEST_TRAINING_CONFIG - ) - def test_method(self, x, y): - return x + y - - test_class = TestClass() - - assert isinstance(test_class.test_method, driver.VertexRemoteFunctor) - assert ( - test_class.test_method.vertex.remote_config.display_name - == _TEST_DISPLAY_NAME - ) - assert ( - test_class.test_method.vertex.remote_config.staging_bucket - == _TEST_STAGING_BUCKET - ) - assert ( - test_class.test_method.vertex.remote_config.container_uri - == _TEST_CONTAINER_URI - ) - assert ( - test_class.test_method.vertex.remote_config.machine_type - == _TEST_MACHINE_TYPE - ) - assert ( - test_class.test_method.vertex.remote_config.service_account - == _TEST_SERVICE_ACCOUNT - ) diff --git a/tests/unit/vertexai/test_model_utils.py b/tests/unit/vertexai/test_model_utils.py deleted file mode 100644 index 77aeed3283..0000000000 --- a/tests/unit/vertexai/test_model_utils.py +++ /dev/null @@ -1,648 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from importlib import reload -from unittest import mock - -from google.cloud import aiplatform -from google.cloud.aiplatform import utils -import vertexai -from vertexai.preview._workflow.serialization_engine import ( - any_serializer, - serializers_base, -) -from google.cloud.aiplatform.compat.services import job_service_client -from google.cloud.aiplatform.compat.types import ( - job_state as gca_job_state, - custom_job as gca_custom_job, - io as gca_io, -) -from google.cloud.aiplatform.compat.services import ( - model_garden_service_client, - model_service_client, -) -from google.cloud.aiplatform.compat.types import ( - deployed_model_ref_v1, - model as gca_model, - publisher_model as gca_publisher_model, -) -from vertexai.preview import language_models -import pytest - -import cloudpickle -import numpy as np -import sklearn -from sklearn.linear_model import _logistic -import tensorflow -import torch - - -# project constants -_TEST_PROJECT = "test-project" -_TEST_LOCATION = "us-central1" -_TEST_BUCKET = "gs://test-bucket" -_TEST_UNIQUE_NAME = "test-unique-name" - - -# framework-specific constants -_SKLEARN_MODEL = _logistic.LogisticRegression() -_TF_MODEL = tensorflow.keras.models.Model() -_PYTORCH_MODEL = torch.nn.Module() -_TEST_MODEL_GCS_URI = "gs://test_model_dir" -_MODEL_RESOURCE_NAME = "projects/123/locations/us-central1/models/456" -_REWRAPPER = "rewrapper" - -# customJob constants -_TEST_CUSTOM_JOB_RESOURCE_NAME = "projects/123/locations/us-central1/customJobs/456" - -# Tuned model constants -_TEST_ID = "123456789" -_TEST_TUNED_MODEL_NAME = ( - f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/models/{_TEST_ID}" -) -_TEST_TUNED_MODEL_ENDPOINT_NAME = ( - f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/endpoints/{_TEST_ID}" -) - -_TEXT_BISON_PUBLISHER_MODEL_DICT = { - "name": "publishers/google/models/text-bison", - "version_id": "001", - "open_source_category": "PROPRIETARY", - "launch_stage": gca_publisher_model.PublisherModel.LaunchStage.GA, - "publisher_model_template": "projects/{user-project}/locations/{location}/publishers/google/models/text-bison@001", - "predict_schemata": { - "instance_schema_uri": "gs://google-cloud-aiplatform/schema/predict/instance/text_generation_1.0.0.yaml", - "parameters_schema_uri": "gs://google-cloud-aiplatfrom/schema/predict/params/text_generation_1.0.0.yaml", - "prediction_schema_uri": "gs://google-cloud-aiplatform/schema/predict/prediction/text_generation_1.0.0.yaml", - }, -} - - -@pytest.fixture -def mock_serialize_model(): - with mock.patch.object( - any_serializer.AnySerializer, "serialize" - ) as mock_serialize_model: - yield mock_serialize_model - - -@pytest.fixture -def mock_vertex_model(): - model = mock.MagicMock(aiplatform.Model) - model.uri = _TEST_MODEL_GCS_URI - model.container_spec.image_uri = "us-docker.xxx/sklearn-cpu.1-0:latest" - model.labels = {"registered_by_vertex_ai": "true"} - yield model - - -@pytest.fixture -def mock_vertex_model_invalid(): - model = mock.MagicMock(aiplatform.Model) - model.uri = _TEST_MODEL_GCS_URI - model.container_spec.image_uri = "us-docker.xxx/sklearn-cpu.1-0:latest" - model.labels = {} - yield model - - -@pytest.fixture -def mock_timestamped_unique_name(): - with mock.patch.object( - utils, "timestamped_unique_name" - ) as mock_timestamped_unique_name: - mock_timestamped_unique_name.return_value = _TEST_UNIQUE_NAME - yield mock_timestamped_unique_name - - -@pytest.fixture -def mock_model_upload(mock_vertex_model): - with mock.patch.object(aiplatform.Model, "upload") as mock_model_upload: - mock_model_upload.return_value = mock_vertex_model - yield mock_model_upload - - -@pytest.fixture -def mock_get_vertex_model(mock_vertex_model): - with mock.patch.object(aiplatform, "Model") as mock_get_vertex_model: - mock_get_vertex_model.return_value = mock_vertex_model - yield mock_get_vertex_model - - -@pytest.fixture -def mock_get_vertex_model_invalid(mock_vertex_model_invalid): - with mock.patch.object(aiplatform, "Model") as mock_get_vertex_model: - mock_get_vertex_model.return_value = mock_vertex_model_invalid - yield mock_get_vertex_model - - -@pytest.fixture -def mock_deserialize_model(): - with mock.patch.object( - any_serializer.AnySerializer, "deserialize" - ) as mock_deserialize_model: - - mock_deserialize_model.side_effect = [ - _SKLEARN_MODEL, - mock.Mock(return_value=None), - ] - yield mock_deserialize_model - - -@pytest.fixture -def mock_deserialize_model_exception(): - with mock.patch.object( - any_serializer.AnySerializer, "deserialize" - ) as mock_deserialize_model_exception: - mock_deserialize_model_exception.side_effect = Exception - yield mock_deserialize_model_exception - - -@pytest.fixture -def mock_any_serializer_serialize_sklearn(): - with mock.patch.object( - any_serializer.AnySerializer, - "serialize", - side_effect=[ - { - serializers_base.SERIALIZATION_METADATA_DEPENDENCIES_KEY: [ - f"scikit-learn=={sklearn.__version__}" - ] - }, - { - serializers_base.SERIALIZATION_METADATA_DEPENDENCIES_KEY: [ - f"numpy=={np.__version__}", - f"cloudpickle=={cloudpickle.__version__}", - ] - }, - { - serializers_base.SERIALIZATION_METADATA_DEPENDENCIES_KEY: [ - f"numpy=={np.__version__}", - f"cloudpickle=={cloudpickle.__version__}", - ] - }, - { - serializers_base.SERIALIZATION_METADATA_DEPENDENCIES_KEY: [ - f"numpy=={np.__version__}", - f"cloudpickle=={cloudpickle.__version__}", - ] - }, - ], - ) as mock_any_serializer_serialize: - yield mock_any_serializer_serialize - - -_TEST_PROJECT = "test-project" -_TEST_LOCATION = "us-central1" -_TEST_PARENT = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}" -_TEST_DISPLAY_NAME = f"{_TEST_PARENT}/customJobs/12345" -_TEST_BUCKET_NAME = "gs://test_bucket" -_TEST_BASE_OUTPUT_DIR = f"{_TEST_BUCKET_NAME}/test_base_output_dir" - -_TEST_INPUTS = [ - "--arg_0=string_val_0", - "--arg_1=string_val_1", - "--arg_2=int_val_0", - "--arg_3=int_val_1", -] -_TEST_IMAGE_URI = "test_image_uri" -_TEST_MACHINE_TYPE = "test_machine_type" -_TEST_WORKER_POOL_SPEC = [ - { - "machine_spec": { - "machine_type": _TEST_MACHINE_TYPE, - }, - "replica_count": 1, - "container_spec": { - "image_uri": _TEST_IMAGE_URI, - "args": _TEST_INPUTS, - }, - } -] -_TEST_CUSTOM_JOB_PROTO = gca_custom_job.CustomJob( - display_name=_TEST_DISPLAY_NAME, - job_spec={ - "worker_pool_specs": _TEST_WORKER_POOL_SPEC, - "base_output_directory": gca_io.GcsDestination( - output_uri_prefix=_TEST_BASE_OUTPUT_DIR - ), - }, - labels={"trained_by_vertex_ai": "true"}, -) - - -@pytest.fixture -def mock_get_custom_job_pending(): - with mock.patch.object( - job_service_client.JobServiceClient, "get_custom_job" - ) as mock_get_custom_job: - - mock_get_custom_job.side_effect = [ - gca_custom_job.CustomJob( - name=_TEST_CUSTOM_JOB_RESOURCE_NAME, - state=gca_job_state.JobState.JOB_STATE_RUNNING, - display_name=_TEST_DISPLAY_NAME, - job_spec={ - "worker_pool_specs": _TEST_WORKER_POOL_SPEC, - "base_output_directory": gca_io.GcsDestination( - output_uri_prefix=_TEST_BASE_OUTPUT_DIR - ), - }, - labels={"trained_by_vertex_ai": "true"}, - ), - gca_custom_job.CustomJob( - name=_TEST_CUSTOM_JOB_RESOURCE_NAME, - state=gca_job_state.JobState.JOB_STATE_SUCCEEDED, - display_name=_TEST_DISPLAY_NAME, - job_spec={ - "worker_pool_specs": _TEST_WORKER_POOL_SPEC, - "base_output_directory": gca_io.GcsDestination( - output_uri_prefix=_TEST_BASE_OUTPUT_DIR - ), - }, - labels={"trained_by_vertex_ai": "true"}, - ), - ] - yield mock_get_custom_job - - -@pytest.fixture -def mock_get_custom_job_failed(): - with mock.patch.object( - job_service_client.JobServiceClient, "get_custom_job" - ) as mock_get_custom_job: - custom_job_proto = _TEST_CUSTOM_JOB_PROTO - custom_job_proto.name = _TEST_CUSTOM_JOB_RESOURCE_NAME - custom_job_proto.state = gca_job_state.JobState.JOB_STATE_FAILED - mock_get_custom_job.return_value = custom_job_proto - yield mock_get_custom_job - - -@pytest.fixture -def get_model_with_tuned_version_label_mock(): - with mock.patch.object( - model_service_client.ModelServiceClient, "get_model" - ) as get_model_mock: - get_model_mock.return_value = gca_model.Model( - display_name="test-display-name", - name=_TEST_TUNED_MODEL_NAME, - labels={"google-vertex-llm-tuning-base-model-id": "text-bison-001"}, - deployed_models=[ - deployed_model_ref_v1.DeployedModelRef( - endpoint=_TEST_TUNED_MODEL_ENDPOINT_NAME, - deployed_model_id=_TEST_TUNED_MODEL_NAME, - ) - ], - ) - yield get_model_mock - - -@pytest.fixture -def get_model_with_invalid_tuned_version_labels(): - with mock.patch.object( - model_service_client.ModelServiceClient, "get_model" - ) as get_model_mock: - get_model_mock.return_value = gca_model.Model( - display_name="test-display-name", - name=_TEST_TUNED_MODEL_NAME, - labels={ - "google-vertex-llm-tuning-base-model-id": "invalidlabel", - "another": "label", - }, - deployed_models=[ - deployed_model_ref_v1.DeployedModelRef( - endpoint=_TEST_TUNED_MODEL_ENDPOINT_NAME, - deployed_model_id=_TEST_TUNED_MODEL_NAME, - ) - ], - ) - yield get_model_mock - - -@pytest.fixture -def mock_get_publisher_model(): - with mock.patch.object( - model_garden_service_client.ModelGardenServiceClient, - "get_publisher_model", - return_value=gca_publisher_model.PublisherModel( - _TEXT_BISON_PUBLISHER_MODEL_DICT - ), - ) as mock_get_publisher_model: - yield mock_get_publisher_model - - -@pytest.mark.usefixtures("google_auth_mock") -class TestModelUtils: - def setup_method(self): - reload(aiplatform) - reload(vertexai) - - def teardown_method(self): - aiplatform.initializer.global_pool.shutdown(wait=True) - - @pytest.mark.usefixtures("mock_timestamped_unique_name") - def test_register_sklearn_model(self, mock_model_upload, mock_serialize_model): - vertexai.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - staging_bucket=_TEST_BUCKET, - ) - vertex_model = vertexai.preview.register(_SKLEARN_MODEL) - - expected_display_name = ( - f"vertex-ai-registered-sklearn-model-{_TEST_UNIQUE_NAME}" - ) - expected_uri = f"{_TEST_BUCKET}/{expected_display_name}" - expected_container_uri = ( - aiplatform.helpers.get_prebuilt_prediction_container_uri( - framework="sklearn", - framework_version="1.0", - ) - ) - - assert vertex_model.uri == _TEST_MODEL_GCS_URI - mock_model_upload.assert_called_once_with( - display_name=expected_display_name, - artifact_uri=expected_uri, - serving_container_image_uri=expected_container_uri, - labels={"registered_by_vertex_ai": "true"}, - sync=True, - ) - assert 2 == mock_serialize_model.call_count - mock_serialize_model.assert_has_calls( - calls=[ - mock.call( - _SKLEARN_MODEL, - f"{expected_uri}/model.pkl", - ), - ], - any_order=True, - ) - - @pytest.mark.parametrize("use_gpu", [True, False]) - @pytest.mark.usefixtures("mock_timestamped_unique_name") - def test_register_tf_model(self, mock_model_upload, mock_serialize_model, use_gpu): - vertexai.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - staging_bucket=_TEST_BUCKET, - ) - vertex_model = vertexai.preview.register(_TF_MODEL, use_gpu=use_gpu) - - expected_display_name = ( - f"vertex-ai-registered-tensorflow-model-{_TEST_UNIQUE_NAME}" - ) - expected_uri = f"{_TEST_BUCKET}/{expected_display_name}/saved_model" - expected_container_uri = ( - aiplatform.helpers.get_prebuilt_prediction_container_uri( - framework="tensorflow", - framework_version="2.11", - accelerator="gpu" if use_gpu else "cpu", - ) - ) - - assert vertex_model.uri == _TEST_MODEL_GCS_URI - mock_model_upload.assert_called_once_with( - display_name=expected_display_name, - artifact_uri=expected_uri, - serving_container_image_uri=expected_container_uri, - labels={"registered_by_vertex_ai": "true"}, - sync=True, - ) - assert 2 == mock_serialize_model.call_count - mock_serialize_model.assert_has_calls( - calls=[ - mock.call( - _TF_MODEL, - f"{expected_uri}", - save_format="tf", - ), - ], - any_order=True, - ) - - @pytest.mark.parametrize("use_gpu", [True, False]) - @pytest.mark.usefixtures("mock_timestamped_unique_name") - def test_register_pytorch_model( - self, mock_model_upload, mock_serialize_model, use_gpu - ): - vertexai.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - staging_bucket=_TEST_BUCKET, - ) - vertex_model = vertexai.preview.register(_PYTORCH_MODEL, use_gpu=use_gpu) - - expected_display_name = ( - f"vertex-ai-registered-pytorch-model-{_TEST_UNIQUE_NAME}" - ) - expected_uri = f"{_TEST_BUCKET}/{expected_display_name}" - expected_container_uri = ( - aiplatform.helpers.get_prebuilt_prediction_container_uri( - framework="pytorch", - framework_version="1.12", - accelerator="gpu" if use_gpu else "cpu", - ) - ) - - assert vertex_model.uri == _TEST_MODEL_GCS_URI - mock_model_upload.assert_called_once_with( - display_name=expected_display_name, - artifact_uri=expected_uri, - serving_container_image_uri=expected_container_uri, - labels={"registered_by_vertex_ai": "true"}, - sync=True, - ) - - assert 2 == mock_serialize_model.call_count - mock_serialize_model.assert_has_calls( - calls=[ - mock.call( - _PYTORCH_MODEL, - f"{expected_uri}/model.mar", - ), - ], - any_order=True, - ) - - @pytest.mark.usefixtures("mock_get_vertex_model") - def test_local_model_from_pretrained_succeed(self, mock_deserialize_model): - vertexai.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - staging_bucket=_TEST_BUCKET, - ) - - local_model = vertexai.preview.from_pretrained(model_name=_MODEL_RESOURCE_NAME) - assert local_model == _SKLEARN_MODEL - assert 2 == mock_deserialize_model.call_count - mock_deserialize_model.assert_has_calls( - calls=[ - mock.call( - f"{_TEST_MODEL_GCS_URI}/model.pkl", - ), - ], - any_order=True, - ) - - @pytest.mark.usefixtures( - "mock_get_vertex_model_invalid", - ) - def test_local_model_from_pretrained_fail(self): - vertexai.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - staging_bucket=_TEST_BUCKET, - ) - - with pytest.raises(ValueError): - vertexai.preview.from_pretrained(model_name=_MODEL_RESOURCE_NAME) - - @pytest.mark.usefixtures( - "mock_get_vertex_model", - "mock_get_custom_job_succeeded", - ) - def test_custom_job_from_pretrained_succeed(self, mock_deserialize_model): - vertexai.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - staging_bucket=_TEST_BUCKET, - ) - - local_model = vertexai.preview.from_pretrained( - custom_job_name=_TEST_CUSTOM_JOB_RESOURCE_NAME - ) - assert local_model == _SKLEARN_MODEL - assert 2 == mock_deserialize_model.call_count - - mock_deserialize_model.assert_has_calls( - calls=[ - mock.call( - f"{_TEST_BASE_OUTPUT_DIR}/output/output_estimator", - ), - ], - any_order=True, - ) - - @pytest.mark.usefixtures( - "mock_get_vertex_model", - "mock_get_custom_job_pending", - "mock_cloud_logging_list_entries", - ) - def test_custom_job_from_pretrained_logs_and_blocks_until_complete_on_pending_job( - self, mock_deserialize_model - ): - vertexai.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - staging_bucket=_TEST_BUCKET, - ) - - local_model = vertexai.preview.from_pretrained( - custom_job_name=_TEST_CUSTOM_JOB_RESOURCE_NAME - ) - assert local_model == _SKLEARN_MODEL - assert 2 == mock_deserialize_model.call_count - - mock_deserialize_model.assert_has_calls( - calls=[ - mock.call( - f"{_TEST_BASE_OUTPUT_DIR}/output/output_estimator", - ), - ], - any_order=True, - ) - - @pytest.mark.usefixtures("mock_get_vertex_model", "mock_get_custom_job_failed") - def test_custom_job_from_pretrained_fails_on_errored_job(self): - vertexai.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - staging_bucket=_TEST_BUCKET, - ) - - with pytest.raises(ValueError) as err_msg: - vertexai.preview.from_pretrained( - custom_job_name=_TEST_CUSTOM_JOB_RESOURCE_NAME - ) - assert "did not complete" in err_msg - - @pytest.mark.usefixtures( - "mock_get_publisher_model", - ) - def test_from_pretrained_with_preview_foundation_model(self): - vertexai.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - staging_bucket=_TEST_BUCKET, - ) - - foundation_model = vertexai.preview.from_pretrained( - foundation_model_name="text-bison@001" - ) - assert isinstance(foundation_model, language_models._PreviewTextGenerationModel) - - @pytest.mark.usefixtures( - "get_model_with_tuned_version_label_mock", - ) - def test_from_pretrained_with_preview_tuned_mg_model( - self, mock_get_publisher_model - ): - vertexai.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - staging_bucket=_TEST_BUCKET, - ) - - tuned_model = vertexai.preview.from_pretrained(model_name=_TEST_ID) - assert mock_get_publisher_model.call_count == 1 - assert isinstance(tuned_model, language_models._PreviewTextGenerationModel) - assert tuned_model._endpoint_name == _TEST_TUNED_MODEL_ENDPOINT_NAME - assert tuned_model._model_id == "publishers/google/models/text-bison@001" - - @pytest.mark.usefixtures( - "mock_get_publisher_model", - "get_model_with_invalid_tuned_version_labels", - ) - def test_from_pretrained_raises_on_invalid_model_registry_model(self): - vertexai.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - staging_bucket=_TEST_BUCKET, - ) - - with pytest.raises(ValueError): - vertexai.preview.from_pretrained(model_name=_TEST_ID) - - def test_from_pretrained_raises_with_more_than_one_arg(self): - vertexai.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - staging_bucket=_TEST_BUCKET, - ) - - with pytest.raises(ValueError): - vertexai.preview.from_pretrained( - model_name=_TEST_ID, foundation_model_name="text-bison@001" - ) - - def test_from_pretrained_raises_with_no_args_passed(self): - vertexai.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - staging_bucket=_TEST_BUCKET, - ) - - with pytest.raises(ValueError): - vertexai.preview.from_pretrained() diff --git a/tests/unit/vertexai/test_persistent_resource_util.py b/tests/unit/vertexai/test_persistent_resource_util.py deleted file mode 100644 index ea88fb148f..0000000000 --- a/tests/unit/vertexai/test_persistent_resource_util.py +++ /dev/null @@ -1,231 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import importlib - -from google.api_core import operation as ga_operation -from google.cloud import aiplatform -import vertexai -from vertexai.preview.developer import remote_specs -from google.cloud.aiplatform_v1beta1.services.persistent_resource_service import ( - PersistentResourceServiceClient, -) -from google.cloud.aiplatform_v1beta1.types import persistent_resource_service -from google.cloud.aiplatform_v1beta1.types.machine_resources import DiskSpec -from google.cloud.aiplatform_v1beta1.types.machine_resources import ( - MachineSpec, -) -from google.cloud.aiplatform_v1beta1.types.persistent_resource import ( - PersistentResource, -) -from google.cloud.aiplatform_v1beta1.types.persistent_resource import ( - ResourcePool, - ResourceRuntimeSpec, - ServiceAccountSpec, -) -from vertexai.preview._workflow.executor import ( - persistent_resource_util, -) -from vertexai.preview._workflow.shared import configs -import mock -import pytest - - -_TEST_PROJECT = "test-project" -_TEST_LOCATION = "us-central1" -_TEST_PARENT = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}" -_TEST_CLUSTER_NAME = "test-cluster" -_TEST_CLUSTER_CONFIG = configs.PersistentResourceConfig(name=_TEST_CLUSTER_NAME) -_TEST_CLUSTER_RESOURCE_NAME = f"{_TEST_PARENT}/persistentResources/{_TEST_CLUSTER_NAME}" - - -_TEST_PERSISTENT_RESOURCE_ERROR = PersistentResource() -_TEST_PERSISTENT_RESOURCE_ERROR.state = "ERROR" - -resource_pool_0 = ResourcePool( - machine_spec=MachineSpec(machine_type="n1-standard-4"), - disk_spec=DiskSpec( - boot_disk_type="pd-ssd", - boot_disk_size_gb=100, - ), - replica_count=1, -) -resource_pool_1 = ResourcePool( - machine_spec=MachineSpec( - machine_type="n1-standard-8", - accelerator_type="NVIDIA_TESLA_T4", - accelerator_count=1, - ), - disk_spec=DiskSpec( - boot_disk_type="pd-ssd", - boot_disk_size_gb=100, - ), - replica_count=2, -) -_TEST_REQUEST_RUNNING_DEFAULT = PersistentResource( - resource_pools=[resource_pool_0], - resource_runtime_spec=ResourceRuntimeSpec( - service_account_spec=ServiceAccountSpec(enable_custom_service_account=False), - ), -) -_TEST_REQUEST_RUNNING_CUSTOM = PersistentResource( - resource_runtime_spec=ResourceRuntimeSpec( - service_account_spec=ServiceAccountSpec(enable_custom_service_account=False), - ), - resource_pools=[resource_pool_0, resource_pool_1], -) - -_TEST_PERSISTENT_RESOURCE_RUNNING = PersistentResource() -_TEST_PERSISTENT_RESOURCE_RUNNING.state = "RUNNING" - -# user-configured remote_specs.ResourcePool -remote_specs_resource_pool_0 = remote_specs.ResourcePool(replica_count=1) -remote_specs_resource_pool_1 = remote_specs.ResourcePool( - machine_type="n1-standard-8", - replica_count=2, - accelerator_type="NVIDIA_TESLA_T4", - accelerator_count=1, -) -_TEST_CUSTOM_RESOURCE_POOLS = [ - remote_specs_resource_pool_0, - remote_specs_resource_pool_1, -] - - -@pytest.fixture -def create_persistent_resource_custom_mock(): - with mock.patch.object( - PersistentResourceServiceClient, - "create_persistent_resource", - ) as create_persistent_resource_custom_mock: - create_persistent_resource_lro_mock = mock.Mock(ga_operation.Operation) - create_persistent_resource_lro_mock.result.return_value = ( - _TEST_REQUEST_RUNNING_CUSTOM - ) - create_persistent_resource_custom_mock.return_value = ( - create_persistent_resource_lro_mock - ) - yield create_persistent_resource_custom_mock - - -@pytest.fixture -def persistent_resource_error_mock(): - with mock.patch.object( - PersistentResourceServiceClient, - "get_persistent_resource", - ) as persistent_resource_error_mock: - persistent_resource_error_mock.return_value = _TEST_PERSISTENT_RESOURCE_ERROR - yield persistent_resource_error_mock - - -@pytest.fixture -def create_persistent_resource_exception_mock(): - with mock.patch.object( - PersistentResourceServiceClient, - "create_persistent_resource", - ) as create_persistent_resource_exception_mock: - create_persistent_resource_exception_mock.side_effect = Exception - yield create_persistent_resource_exception_mock - - -@pytest.mark.usefixtures("google_auth_mock") -class TestPersistentResourceUtils: - def setup_method(self): - importlib.reload(vertexai.preview.initializer) - importlib.reload(vertexai.preview) - - def teardown_method(self): - aiplatform.initializer.global_pool.shutdown(wait=True) - - def test_check_persistent_resource_true(self, persistent_resource_running_mock): - expected = persistent_resource_util.check_persistent_resource( - _TEST_CLUSTER_RESOURCE_NAME - ) - - assert expected - - request = persistent_resource_service.GetPersistentResourceRequest( - name=_TEST_CLUSTER_RESOURCE_NAME, - ) - persistent_resource_running_mock.assert_called_once_with(request) - - def test_check_persistent_resource_false(self, persistent_resource_exception_mock): - with pytest.raises(Exception): - expected = persistent_resource_util.check_persistent_resource( - _TEST_CLUSTER_RESOURCE_NAME - ) - - assert not expected - - request = persistent_resource_service.GetPersistentResourceRequest( - name=_TEST_CLUSTER_RESOURCE_NAME, - ) - persistent_resource_exception_mock.assert_called_once_with(request) - - @pytest.mark.usefixtures("persistent_resource_error_mock") - def test_check_persistent_resource_error(self): - with pytest.raises(ValueError) as e: - persistent_resource_util.check_persistent_resource( - _TEST_CLUSTER_RESOURCE_NAME - ) - - e.match( - regexp=r'(\'The existing cluster `\', \'projects/test-project/locations/us-central1/persistentResources/test-cluster\', "` isn\'t running, please specify a different cluster_name.")' - ) - - @pytest.mark.usefixtures("persistent_resource_running_mock") - def test_create_persistent_resource_default_success( - self, create_persistent_resource_default_mock - ): - persistent_resource_util.create_persistent_resource(_TEST_CLUSTER_RESOURCE_NAME) - - request = persistent_resource_service.CreatePersistentResourceRequest( - parent=_TEST_PARENT, - persistent_resource=_TEST_REQUEST_RUNNING_DEFAULT, - persistent_resource_id=_TEST_CLUSTER_NAME, - ) - - create_persistent_resource_default_mock.assert_called_with( - request, - ) - - @pytest.mark.usefixtures("persistent_resource_running_mock") - def test_create_persistent_resource_custom_success( - self, create_persistent_resource_custom_mock - ): - persistent_resource_util.create_persistent_resource( - cluster_resource_name=_TEST_CLUSTER_RESOURCE_NAME, - resource_pools=_TEST_CUSTOM_RESOURCE_POOLS, - ) - - request = persistent_resource_service.CreatePersistentResourceRequest( - parent=_TEST_PARENT, - persistent_resource=_TEST_REQUEST_RUNNING_CUSTOM, - persistent_resource_id=_TEST_CLUSTER_NAME, - ) - - create_persistent_resource_custom_mock.assert_called_with( - request, - ) - - @pytest.mark.usefixtures("create_persistent_resource_exception_mock") - def test_create_ray_cluster_state_error(self): - with pytest.raises(ValueError) as e: - persistent_resource_util.create_persistent_resource( - _TEST_CLUSTER_RESOURCE_NAME - ) - - e.match(regexp=r"Failed in cluster creation due to: ") diff --git a/tests/unit/vertexai/test_remote_container_training.py b/tests/unit/vertexai/test_remote_container_training.py deleted file mode 100644 index 2b156b8ba6..0000000000 --- a/tests/unit/vertexai/test_remote_container_training.py +++ /dev/null @@ -1,586 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -"""Tests for _workflow/executor/remote_container_training.py. -""" - -from importlib import reload -import inspect -import os -import re -import tempfile - -import cloudpickle -from google.cloud import aiplatform -import vertexai -from google.cloud.aiplatform.compat.types import ( - custom_job as gca_custom_job_compat, -) -from google.cloud.aiplatform.compat.types import io as gca_io_compat -from vertexai.preview._workflow.driver import remote -from vertexai.preview._workflow.executor import ( - remote_container_training, -) -from vertexai.preview._workflow.shared import configs -from vertexai.preview.developer import remote_specs -import pandas as pd -import pytest - - -# Custom job constants. -_TEST_INPUTS = [ - "--arg_0=string_val_0", - "--arg_1=string_val_1", - "--arg_2=int_val_0", - "--arg_3=int_val_1", -] -_TEST_IMAGE_URI = "test_image_uri" -_TEST_MACHINE_TYPE = "n1-standard-4" - -_TEST_PROJECT = "test-project" -_TEST_LOCATION = "us-central1" - -_TEST_BUCKET_NAME = "gs://test_bucket" -_TEST_BASE_OUTPUT_DIR = f"{_TEST_BUCKET_NAME}/test_base_output_dir" - -_TEST_DISPLAY_NAME = "test_display_name" -_TEST_STAGING_BUCKET = "gs://test-staging-bucket" -_TEST_CONTAINER_URI = "gcr.io/test-image" -_TEST_REPLICA_COUNT = 1 -_TEST_ACCELERATOR_COUNT = 8 -_TEST_ACCELERATOR_TYPE = "NVIDIA_TESLA_K80" -_TEST_BOOT_DISK_TYPE = "test_boot_disk_type" -_TEST_BOOT_DISK_SIZE_GB = 10 -_TEST_REMOTE_CONTAINER_TRAINING_CONFIG = configs.DistributedTrainingConfig( - display_name=_TEST_DISPLAY_NAME, - staging_bucket=_TEST_STAGING_BUCKET, - machine_type=_TEST_MACHINE_TYPE, - replica_count=_TEST_REPLICA_COUNT, - accelerator_count=_TEST_ACCELERATOR_COUNT, - accelerator_type=_TEST_ACCELERATOR_TYPE, - boot_disk_type=_TEST_BOOT_DISK_TYPE, - boot_disk_size_gb=_TEST_BOOT_DISK_SIZE_GB, -) - -_TEST_WORKER_POOL_SPECS = remote_specs.WorkerPoolSpecs( - chief=remote_specs.WorkerPoolSpec( - machine_type=_TEST_MACHINE_TYPE, - replica_count=_TEST_REPLICA_COUNT, - accelerator_count=_TEST_ACCELERATOR_COUNT, - accelerator_type=_TEST_ACCELERATOR_TYPE, - boot_disk_type=_TEST_BOOT_DISK_TYPE, - boot_disk_size_gb=_TEST_BOOT_DISK_SIZE_GB, - ) -) - -_TEST_REMOTE_CONTAINER_TRAINING_CONFIG_WORKER_POOL = configs.DistributedTrainingConfig( - display_name=_TEST_DISPLAY_NAME, - staging_bucket=_TEST_STAGING_BUCKET, - worker_pool_specs=_TEST_WORKER_POOL_SPECS, -) - -_TEST_REMOTE_CONTAINER_TRAINING_CONFIG_INVALID = configs.DistributedTrainingConfig( - display_name=_TEST_DISPLAY_NAME, - staging_bucket=_TEST_STAGING_BUCKET, - machine_type=_TEST_MACHINE_TYPE, - replica_count=_TEST_REPLICA_COUNT, - accelerator_count=_TEST_ACCELERATOR_COUNT, - accelerator_type=_TEST_ACCELERATOR_TYPE, - boot_disk_type=_TEST_BOOT_DISK_TYPE, - boot_disk_size_gb=_TEST_BOOT_DISK_SIZE_GB, - worker_pool_specs=_TEST_WORKER_POOL_SPECS, -) - - -# pylint: disable=protected-access,missing-function-docstring -class TestRemoteContainerTrain: - """Tests for remote_container_train and helper functions.""" - - def setup_method(self): - reload(aiplatform.initializer) - reload(aiplatform) - reload(vertexai.preview.initializer) - reload(vertexai) - - def test_generate_worker_pool_specs_single_machine(self): - expected_worker_pool_specs = [ - { - "replica_count": 1, - "machine_spec": { - "machine_type": _TEST_MACHINE_TYPE, - }, - "disk_spec": { - "boot_disk_type": _TEST_BOOT_DISK_TYPE, - "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB, - }, - "container_spec": { - "image_uri": _TEST_IMAGE_URI, - "args": _TEST_INPUTS, - }, - } - ] - - worker_pool_specs = remote_container_training._generate_worker_pool_specs( - image_uri=_TEST_IMAGE_URI, - inputs=_TEST_INPUTS, - machine_type=_TEST_MACHINE_TYPE, - replica_count=1, - boot_disk_type=_TEST_BOOT_DISK_TYPE, - boot_disk_size_gb=_TEST_BOOT_DISK_SIZE_GB, - ) - - assert worker_pool_specs == expected_worker_pool_specs - - def test_generate_worker_pool_specs_distributed(self): - expected_worker_pool_specs = [ - { - "replica_count": 1, - "machine_spec": { - "machine_type": _TEST_MACHINE_TYPE, - }, - "disk_spec": { - "boot_disk_type": "pd-ssd", - "boot_disk_size_gb": 100, - }, - "container_spec": { - "image_uri": _TEST_IMAGE_URI, - "args": _TEST_INPUTS, - }, - }, - { - "replica_count": 3, - "machine_spec": { - "machine_type": _TEST_MACHINE_TYPE, - }, - "disk_spec": { - "boot_disk_type": "pd-ssd", - "boot_disk_size_gb": 100, - }, - "container_spec": { - "image_uri": _TEST_IMAGE_URI, - "args": _TEST_INPUTS, - }, - }, - ] - - worker_pool_specs = remote_container_training._generate_worker_pool_specs( - image_uri=_TEST_IMAGE_URI, - inputs=_TEST_INPUTS, - replica_count=4, - machine_type=_TEST_MACHINE_TYPE, - ) - - assert worker_pool_specs == expected_worker_pool_specs - - def test_generate_worker_pool_specs_gpu(self): - test_accelerator_type = "NVIDIA_TESLA_K80" - test_accelerator_count = 8 - - expected_worker_pool_specs = [ - { - "replica_count": 1, - "machine_spec": { - "machine_type": _TEST_MACHINE_TYPE, - "accelerator_type": test_accelerator_type, - "accelerator_count": test_accelerator_count, - }, - "disk_spec": { - "boot_disk_type": "pd-ssd", - "boot_disk_size_gb": 100, - }, - "container_spec": { - "image_uri": _TEST_IMAGE_URI, - "args": _TEST_INPUTS, - }, - } - ] - - worker_pool_specs = remote_container_training._generate_worker_pool_specs( - image_uri=_TEST_IMAGE_URI, - inputs=_TEST_INPUTS, - machine_type=_TEST_MACHINE_TYPE, - accelerator_count=test_accelerator_count, - accelerator_type=test_accelerator_type, - ) - - assert worker_pool_specs == expected_worker_pool_specs - - def test_generate_worker_pool_specs_invalid(self): - with pytest.raises(ValueError) as e: - remote_container_training._generate_worker_pool_specs( - image_uri=_TEST_IMAGE_URI, - inputs=_TEST_INPUTS, - replica_count=0, - machine_type=_TEST_MACHINE_TYPE, - ) - expected_err_msg = "replica_count must be a positive number but is 0." - assert str(e.value) == expected_err_msg - - # pylint: disable=missing-function-docstring,protected-access - @pytest.mark.parametrize( - "remote_config", - [ - (_TEST_REMOTE_CONTAINER_TRAINING_CONFIG), - (_TEST_REMOTE_CONTAINER_TRAINING_CONFIG_WORKER_POOL), - ], - ) - @pytest.mark.usefixtures( - "google_auth_mock", "mock_uuid", "mock_get_custom_job_succeeded" - ) - def test_remote_container_train( - self, - mock_blob_upload_from_filename, - mock_create_custom_job, - mock_named_temp_file, - mock_blob_download_to_filename, - remote_config: configs.DistributedTrainingConfig, - ): - # pylint: disable=missing-class-docstring - class MockTrainer(remote.VertexModel): - def __init__(self, input_0, input_1): - super().__init__() - sig = inspect.signature(self.__init__) - self._binding = sig.bind(input_0, input_1).arguments - self.output_0 = None - self.output_1 = None - - # pylint: disable=invalid-name,unused-argument,missing-function-docstring - @vertexai.preview.developer.mark._remote_container_train( - image_uri=_TEST_IMAGE_URI, - additional_data=[ - remote_specs._InputParameterSpec("input_0"), - remote_specs._InputParameterSpec( - "input_1", serializer="cloudpickle" - ), - remote_specs._InputParameterSpec("X", serializer="parquet"), - remote_specs._OutputParameterSpec("output_0"), - remote_specs._OutputParameterSpec( - "output_1", deserializer="cloudpickle" - ), - ], - remote_config=remote_config, - ) - def fit(self, X): - self.output_0 = int(self.output_0) - - def test_input_1(x): - return x - - test_trainer = MockTrainer( - input_0="test_input_0", - input_1=test_input_1, - ) - test_data = pd.DataFrame(data={"col_0": [0, 1], "col_1": [2, 3]}) - test_output_0 = 10 - - def test_output_1(x): - return x + 1 - - assert test_trainer.fit._remote_executor is remote_container_training.train - - with tempfile.TemporaryDirectory() as tmp_dir: - # Sets up file mocks - test_input_1_path = os.path.join(tmp_dir, "input_1") - test_input_1_handler = open(test_input_1_path, "wb") - - test_serialized_path = os.path.join(tmp_dir, "serialized") - test_serialized_handler = open(test_serialized_path, "wb") - - test_metadata_path = os.path.join(tmp_dir, "metadata") - test_metadata_handler = open(test_metadata_path, "wb") - - test_output_0_path = os.path.join(tmp_dir, "output_0") - with open(test_output_0_path, "w") as f: - f.write(f"{test_output_0}") - test_output_0_handler = open(test_output_0_path, "r") - - test_output_1_path = os.path.join(tmp_dir, "output_1") - with open(test_output_1_path, "wb") as f: - f.write(cloudpickle.dumps(test_output_1)) - test_output_1_handler = open(test_output_1_path, "rb") - - (mock_named_temp_file.return_value.__enter__.side_effect) = [ - test_input_1_handler, - test_serialized_handler, - test_metadata_handler, - test_output_0_handler, - test_output_1_handler, - ] - - # Calls the decorated function - aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION) - test_trainer.fit(test_data) - - # Checks the created custom job and outputs - expected_inputs = [ - "--input_0=test_input_0", - f"--input_1={_TEST_STAGING_BUCKET}/input/input_1", - f"--X={_TEST_STAGING_BUCKET}/input/X", - f"--output_0={_TEST_STAGING_BUCKET}/output/output_0", - f"--output_1={_TEST_STAGING_BUCKET}/output/output_1", - ] - - assert mock_blob_upload_from_filename.call_count == 3 - assert mock_blob_download_to_filename.call_count == 2 - - expected_worker_pool_specs = [ - { - "replica_count": 1, - "machine_spec": { - "machine_type": _TEST_MACHINE_TYPE, - "accelerator_type": _TEST_ACCELERATOR_TYPE, - "accelerator_count": _TEST_ACCELERATOR_COUNT, - }, - "disk_spec": { - "boot_disk_type": _TEST_BOOT_DISK_TYPE, - "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB, - }, - "container_spec": { - "image_uri": _TEST_IMAGE_URI, - "args": expected_inputs, - }, - } - ] - expected_custom_job = gca_custom_job_compat.CustomJob( - display_name=f"MockTrainer-{_TEST_DISPLAY_NAME}-0", - job_spec=gca_custom_job_compat.CustomJobSpec( - worker_pool_specs=expected_worker_pool_specs, - base_output_directory=gca_io_compat.GcsDestination( - output_uri_prefix=os.path.join(_TEST_STAGING_BUCKET, "custom_job"), - ), - ), - ) - mock_create_custom_job.assert_called_once_with( - parent=f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}", - custom_job=expected_custom_job, - timeout=None, - ) - - assert test_trainer.output_0 == test_output_0 - # pylint: disable=not-callable - assert test_trainer.output_1(1) == test_output_1(1) - - # pylint: disable=missing-function-docstring,protected-access - def test_remote_container_train_invalid_additional_data(self): - # pylint: disable=missing-class-docstring - class MockTrainer(remote.VertexModel): - def __init__(self): - super().__init__() - self._binding = {} - - # pylint: disable=invalid-name,missing-function-docstring - @vertexai.preview.developer.mark._remote_container_train( - image_uri=_TEST_IMAGE_URI, - additional_data=["invalid"], - remote_config=configs.DistributedTrainingConfig( - staging_bucket=_TEST_STAGING_BUCKET - ), - ) - def fit(self): - return - - test_trainer = MockTrainer() - assert test_trainer.fit._remote_executor is remote_container_training.train - - with pytest.raises(ValueError, match="Invalid data type"): - test_trainer.fit() - - @pytest.mark.usefixtures( - "google_auth_mock", "mock_uuid", "mock_get_custom_job_succeeded" - ) - def test_remote_container_train_invalid_local(self): - # pylint: disable=missing-class-docstring - class MockTrainer(remote.VertexModel): - def __init__(self): - super().__init__() - self._binding = {} - - # pylint: disable=invalid-name,missing-function-docstring - @vertexai.preview.developer.mark._remote_container_train( - image_uri=_TEST_IMAGE_URI, - additional_data=[], - remote_config=configs.DistributedTrainingConfig( - staging_bucket=_TEST_STAGING_BUCKET - ), - ) - def fit(self): - return - - test_trainer = MockTrainer() - assert test_trainer.fit._remote_executor is remote_container_training.train - test_trainer.fit.vertex.remote = False - with pytest.raises( - ValueError, - match="Remote container train is only supported for remote mode.", - ): - test_trainer.fit() - - # pylint: disable=missing-function-docstring,protected-access - @pytest.mark.usefixtures( - "google_auth_mock", "mock_uuid", "mock_get_custom_job_succeeded" - ) - def test_remote_container_train_default_config(self, mock_create_custom_job): - class MockTrainer(remote.VertexModel): - def __init__(self): - super().__init__() - self._binding = {} - - # pylint: disable=invalid-name,missing-function-docstring - @vertexai.preview.developer.mark._remote_container_train( - image_uri=_TEST_IMAGE_URI, - additional_data=[], - ) - def fit(self): - return - - test_trainer = MockTrainer() - - aiplatform.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - staging_bucket=_TEST_STAGING_BUCKET, - ) - - test_trainer.fit() - - expected_display_name = "MockTrainer-remote-fit" - expected_worker_pool_specs = [ - { - "replica_count": 1, - "machine_spec": { - "machine_type": remote_container_training._DEFAULT_MACHINE_TYPE, - "accelerator_type": ( - remote_container_training._DEFAULT_ACCELERATOR_TYPE - ), - "accelerator_count": ( - remote_container_training._DEFAULT_ACCELERATOR_COUNT - ), - }, - "disk_spec": { - "boot_disk_type": remote_container_training._DEFAULT_BOOT_DISK_TYPE, - "boot_disk_size_gb": ( - remote_container_training._DEFAULT_BOOT_DISK_SIZE_GB - ), - }, - "container_spec": { - "image_uri": _TEST_IMAGE_URI, - "args": [], - }, - } - ] - expected_custom_job = gca_custom_job_compat.CustomJob( - display_name=f"{expected_display_name}-0", - job_spec=gca_custom_job_compat.CustomJobSpec( - worker_pool_specs=expected_worker_pool_specs, - base_output_directory=gca_io_compat.GcsDestination( - output_uri_prefix=os.path.join(_TEST_STAGING_BUCKET, "custom_job"), - ), - ), - ) - mock_create_custom_job.assert_called_once_with( - parent=f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}", - custom_job=expected_custom_job, - timeout=None, - ) - - @pytest.mark.usefixtures( - "google_auth_mock", "mock_uuid", "mock_get_custom_job_succeeded" - ) - def test_remote_container_train_job_dir(self, mock_create_custom_job): - class MockTrainer(remote.VertexModel): - def __init__(self): - super().__init__() - self._binding = {"job_dir": ""} - - # pylint: disable=invalid-name,missing-function-docstring - @vertexai.preview.developer.mark._remote_container_train( - image_uri=_TEST_IMAGE_URI, - additional_data=[remote_specs._InputParameterSpec("job_dir")], - remote_config=configs.DistributedTrainingConfig( - staging_bucket=_TEST_STAGING_BUCKET - ), - ) - def fit(self): - return - - test_trainer = MockTrainer() - - aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION) - - test_trainer.fit() - - expected_display_name = "MockTrainer-remote-fit" - expected_job_dir = os.path.join(_TEST_STAGING_BUCKET, "custom_job") - expected_worker_pool_specs = [ - { - "replica_count": 1, - "machine_spec": { - "machine_type": remote_container_training._DEFAULT_MACHINE_TYPE, - "accelerator_type": remote_container_training._DEFAULT_ACCELERATOR_TYPE, - "accelerator_count": remote_container_training._DEFAULT_ACCELERATOR_COUNT, - }, - "disk_spec": { - "boot_disk_type": remote_container_training._DEFAULT_BOOT_DISK_TYPE, - "boot_disk_size_gb": remote_container_training._DEFAULT_BOOT_DISK_SIZE_GB, - }, - "container_spec": { - "image_uri": _TEST_IMAGE_URI, - "args": [f"--job_dir={expected_job_dir}"], - }, - } - ] - expected_custom_job = gca_custom_job_compat.CustomJob( - display_name=f"{expected_display_name}-0", - job_spec=gca_custom_job_compat.CustomJobSpec( - worker_pool_specs=expected_worker_pool_specs, - base_output_directory=gca_io_compat.GcsDestination( - output_uri_prefix=os.path.join(_TEST_STAGING_BUCKET, "custom_job"), - ), - ), - ) - mock_create_custom_job.assert_called_once_with( - parent=f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}", - custom_job=expected_custom_job, - timeout=None, - ) - - @pytest.mark.usefixtures( - "google_auth_mock", "mock_uuid", "mock_get_custom_job_succeeded" - ) - def test_remote_container_train_invalid_remote_config(self): - # pylint: disable=missing-class-docstring - class MockTrainer(remote.VertexModel): - def __init__(self): - super().__init__() - self._binding = {} - - # pylint: disable=invalid-name,missing-function-docstring - @vertexai.preview.developer.mark._remote_container_train( - image_uri=_TEST_IMAGE_URI, - additional_data=[], - remote_config=_TEST_REMOTE_CONTAINER_TRAINING_CONFIG_INVALID, - ) - def fit(self): - return - - test_trainer = MockTrainer() - assert test_trainer.fit._remote_executor is remote_container_training.train - with pytest.raises( - ValueError, - match=re.escape( - "Cannot specify both 'worker_pool_specs' and ['machine_type', 'accelerator_type', 'accelerator_count', 'replica_count', 'boot_disk_type', 'boot_disk_size_gb']." - ), - ): - test_trainer.fit() diff --git a/tests/unit/vertexai/test_remote_prediction.py b/tests/unit/vertexai/test_remote_prediction.py deleted file mode 100644 index 81b2816913..0000000000 --- a/tests/unit/vertexai/test_remote_prediction.py +++ /dev/null @@ -1,139 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from importlib import reload -import inspect -from unittest.mock import patch - -from google.cloud import aiplatform -import vertexai -from vertexai.preview._workflow.executor import prediction -from vertexai.preview._workflow.executor import training -from vertexai.preview._workflow.shared import configs - -import pytest -from sklearn.datasets import load_iris -from sklearn.linear_model import _logistic -from sklearn.model_selection import train_test_split - - -# vertexai constants -_TEST_PROJECT = "test-project" -_TEST_PROJECT_NUMBER = 123 -_TEST_LOCATION = "us-central1" -_TEST_PARENT = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}" -_TEST_BUCKET_NAME = "gs://test-bucket" - -# dataset constants -dataset = load_iris() -_X_TRAIN, _X_TEST, _Y_TRAIN, _Y_TEST = train_test_split( - dataset.data, dataset.target, test_size=0.2, random_state=42 -) - -# config constants -_TEST_CONTAINER_URI = "gcr.io/custom-image" -_TEST_DISPLAY_NAME = "test-display-name" - - -@pytest.fixture -def mock_remote_training(): - with patch.object(training, "remote_training") as mock_remote_training: - mock_remote_training.return_value = _Y_TEST - yield mock_remote_training - - -@pytest.mark.usefixtures("google_auth_mock") -class TestRemotePrediction: - def setup_method(self): - reload(vertexai) - reload(vertexai.preview.initializer) - reload(_logistic) - - def teardown_method(self): - aiplatform.initializer.global_pool.shutdown(wait=True) - - def test_remote_prediction_sklearn(self, mock_remote_training): - vertexai.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - staging_bucket=_TEST_BUCKET_NAME, - ) - vertexai.preview.init(remote=True) - - LogisticRegression = vertexai.preview.remote(_logistic.LogisticRegression) - model = LogisticRegression() - - model.predict.vertex.remote = True - model.predict.vertex.remote_config.staging_bucket = _TEST_BUCKET_NAME - - model.predict(_X_TEST) - - invokable = mock_remote_training.call_args[1]["invokable"] - assert invokable.method == model.predict._method - assert invokable.bound_arguments == ( - inspect.signature(model.predict._method).bind(_X_TEST) - ) - - assert invokable.vertex_config.remote is True - - assert invokable.vertex_config.remote_config.display_name is None - assert invokable.vertex_config.remote_config.staging_bucket == _TEST_BUCKET_NAME - assert invokable.vertex_config.remote_config.container_uri is None - assert invokable.vertex_config.remote_config.machine_type is None - assert invokable.vertex_config.remote_config.service_account is None - - assert invokable.remote_executor == prediction.remote_prediction - assert invokable.remote_executor_kwargs == {} - assert invokable.instance == model - - def test_remote_prediction_with_set_config(self, mock_remote_training): - vertexai.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - staging_bucket=_TEST_BUCKET_NAME, - ) - vertexai.preview.init(remote=True) - - LogisticRegression = vertexai.preview.remote(_logistic.LogisticRegression) - model = LogisticRegression() - - model.predict.vertex.remote = True - - model.predict.vertex.set_config( - staging_bucket=_TEST_BUCKET_NAME, display_name=_TEST_DISPLAY_NAME - ) - - model.predict(_X_TEST) - - invokable = mock_remote_training.call_args[1]["invokable"] - - assert invokable.method == model.predict._method - assert invokable.bound_arguments == ( - inspect.signature(model.predict._method).bind(_X_TEST) - ) - - assert invokable.vertex_config.remote is True - assert isinstance(invokable.vertex_config.remote_config, configs.RemoteConfig) - - assert invokable.vertex_config.remote_config.display_name == _TEST_DISPLAY_NAME - assert invokable.vertex_config.remote_config.staging_bucket == _TEST_BUCKET_NAME - assert invokable.vertex_config.remote_config.container_uri is None - assert invokable.vertex_config.remote_config.machine_type is None - assert invokable.vertex_config.remote_config.service_account is None - - assert invokable.remote_executor == prediction.remote_prediction - assert invokable.remote_executor_kwargs == {} - assert invokable.instance == model diff --git a/tests/unit/vertexai/test_remote_specs.py b/tests/unit/vertexai/test_remote_specs.py deleted file mode 100644 index 4a73985af7..0000000000 --- a/tests/unit/vertexai/test_remote_specs.py +++ /dev/null @@ -1,712 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -"""Tests for developer/remote_specs.py. -""" - -import json -import os -import re -import tempfile -from typing import Any, Dict, List - -import cloudpickle -import vertexai -from vertexai.preview.developer import remote_specs -import mock -import pandas as pd -import pytest -import torch - - -_TEST_BINDING = { - "arg_0": 10, - "arg_1": lambda x: x + 1, - "arg_2": pd.DataFrame(data={"col_0": [0, 1], "col_1": [2, 3]}), -} - -_TEST_MACHINE_TYPE = "n1-standard-16" -_TEST_REPLICA_COUNT = 1 -_TEST_BOOT_DISK_TYPE_DEFAULT = "pd-ssd" -_TEST_BOOT_DISK_SIZE_GB_DEFAULT = 100 - -_TEST_WORKER_POOL_SPEC_OBJ_MACHINE_TYPE = remote_specs.WorkerPoolSpec( - machine_type=_TEST_MACHINE_TYPE, replica_count=_TEST_REPLICA_COUNT -) - -_TEST_WORKER_POOL_SPEC_MACHINE_TYPE = { - "machine_spec": {"machine_type": _TEST_MACHINE_TYPE}, - "replica_count": _TEST_REPLICA_COUNT, - "disk_spec": { - "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT, - "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT, - }, -} - -_TEST_WORKER_POOL_SPEC_MACHINE_TYPE_CONTAINER_SPEC = { - "machine_spec": {"machine_type": _TEST_MACHINE_TYPE}, - "replica_count": _TEST_REPLICA_COUNT, - "disk_spec": { - "boot_disk_type": _TEST_BOOT_DISK_TYPE_DEFAULT, - "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB_DEFAULT, - }, - "container_spec": { - "image_uri": "test-image", - "command": ["python3", "run.py"], - "args": [], - }, -} - -_TEST_CLUSTER_SPEC_CHIEF_STR = '{"cluster":{"workerpool0":["cmle-training-workerpool0-1d969d3ba6-0:2222"],"workerpool1":["cmle-training-workerpool1-1d969d3ba6-0:2222"]},"environment":"cloud","task":{"type":"workerpool0","index":0}}' -_TEST_CLUSTER_SPEC_WORKER_STR = '{"cluster":{"workerpool0":["cmle-training-workerpool0-1d969d3ba6-0:2222"],"workerpool1":["cmle-training-workerpool1-1d969d3ba6-0:2222"]},"environment":"cloud","task":{"type":"workerpool1","index":0}}' - -_TEST_OUTPUT_PATH = "gs://test-bucket/output" - - -def _get_vertex_cluster_spec(task_type: str = "workerpool0", task_index: int = 0): - # pylint: disable=protected-access,missing-function-docstring - return { - "cluster": { - remote_specs._CHIEF: ["cmle-training-workerpool0-id-0:2222"], - remote_specs._WORKER: [ - "cmle-training-workerpool1-id-0:2222", - "cmle-training-workerpool1-id-1:2222", - "cmle-training-workerpool1-id-2:2222", - ], - remote_specs._SERVER: [ - "cmle-training-workerpool2-id-0:2222", - "cmle-training-workerpool2-id-1:2222", - "cmle-training-workerpool2-id-2:2222", - ], - remote_specs._EVALUATOR: ["cmle-training-workerpool3-id-0:2222"], - }, - remote_specs._TASK: { - remote_specs._TYPE: task_type, - remote_specs._INDEX: task_index, - }, - } - - -class TestRemoteSpec: - """Tests for parameter spec classes and helper function(s).""" - - # pylint: disable=protected-access,missing-function-docstring - @pytest.mark.parametrize( - "name,expected_argument_name", - [ - ("self.a", "a"), - ("a.b.c", "c"), - ("_arg_0", "arg_0"), - ("__arg_0", "__arg_0"), - ("arg_0", "arg_0"), - ], - ) - def test_get_argument_name(self, name: str, expected_argument_name: str): - argument_name = remote_specs._get_argument_name(name) - assert argument_name == expected_argument_name - - # pylint: disable=missing-function-docstring,protected-access - @pytest.mark.parametrize( - "name", - [ - ("."), - (".."), - ("_"), - ], - ) - def test_get_argument_name_invalid(self, name: str): - err_msg = f"Failed to get argument name from name {name}." - with pytest.raises(ValueError) as e: - remote_specs._get_argument_name(name) - assert re.match(err_msg, str(e.value)) - - def test_input_parameter_spec_default(self): - param_spec = remote_specs._InputParameterSpec("arg_0") - assert param_spec.name == "arg_0" - assert param_spec.argument_name == "arg_0" - assert param_spec.serializer == "literal" - - def test_input_parameter_spec_argument_name(self): - param_spec = remote_specs._InputParameterSpec("arg_0", argument_name="input_0") - assert param_spec.name == "arg_0" - assert param_spec.argument_name == "input_0" - assert param_spec.serializer == "literal" - - def test_input_parameter_spec_argument_name_empty(self): - err_msg = "Input parameter name cannot be empty" - with pytest.raises(ValueError) as e: - remote_specs._InputParameterSpec("") - assert re.match(err_msg, str(e.value)) - - @pytest.mark.parametrize("serializer", ["literal", "parquet", "cloudpickle"]) - def test_input_parameter_spec_serializer_valid(self, serializer: str): - param_spec = remote_specs._InputParameterSpec("arg_0", serializer=serializer) - assert param_spec.name == "arg_0" - assert param_spec.argument_name == "arg_0" - assert param_spec.serializer == serializer - - def test_input_parameter_spec_serializer_invalid(self): - err_msg = "Invalid serializer" - with pytest.raises(ValueError) as e: - remote_specs._InputParameterSpec("arg_0", serializer="invalid") - assert re.match(err_msg, str(e.value)) - - def test_input_format_arg_literal(self): - test_spec = remote_specs._InputParameterSpec("arg_0", serializer="literal") - assert test_spec.format_arg("", _TEST_BINDING) == _TEST_BINDING["arg_0"] - - # pylint: disable=redefined-outer-name - @pytest.mark.usefixtures("google_auth_mock") - def test_input_format_arg_cloudpickle( - self, mock_named_temp_file, mock_blob_upload_from_filename - ): - - with tempfile.TemporaryDirectory() as tmp_dir: - tmp_path = os.path.join(tmp_dir, "tmp") - tmp_handler = open(tmp_path, "wb") - (mock_named_temp_file.return_value.__enter__.return_value) = tmp_handler - - spec = remote_specs._InputParameterSpec("arg_1", serializer="cloudpickle") - assert ( - spec.format_arg("gs://bucket/path", _TEST_BINDING) - == "gs://bucket/path/arg_1" - ) - mock_blob_upload_from_filename.assert_called_once() - - with open(tmp_path, "rb") as f: - assert cloudpickle.loads(f.read())(1) == _TEST_BINDING["arg_1"](1) - - @pytest.mark.usefixtures("google_auth_mock") - def test_input_format_arg_parquet( - self, mock_named_temp_file, mock_blob_upload_from_filename - ): - with tempfile.TemporaryDirectory() as tmp_dir: - tmp_serialized_path = os.path.join(tmp_dir, "serialized") - tmp_serialized_handler = open(tmp_serialized_path, "wb") - - tmp_metadata_path = os.path.join(tmp_dir, "metadata") - tmp_handler = open(tmp_metadata_path, "wb") - (mock_named_temp_file.return_value.__enter__.side_effect) = [ - tmp_serialized_handler, - tmp_handler, - ] - - spec = remote_specs._InputParameterSpec("arg_2", serializer="parquet") - assert ( - spec.format_arg("gs://bucket/path", _TEST_BINDING) - == "gs://bucket/path/arg_2" - ) - assert mock_blob_upload_from_filename.call_count == 2 - - upload_calls = mock_blob_upload_from_filename.call_args_list - - metadata_path = upload_calls[1][1]["filename"] - - assert metadata_path == tmp_metadata_path - expected_metadata = { - "col_0": { - "dtype": "int64", - "feature_type": "dense", - }, - "col_1": { - "dtype": "int64", - "feature_type": "dense", - }, - } - with open(tmp_metadata_path, "rb") as f: - assert cloudpickle.loads(f.read()) == expected_metadata - - @pytest.mark.parametrize( - "spec,binding,msg", - [ - ( - remote_specs._InputParameterSpec("arg_4"), - _TEST_BINDING, - "Input arg_4 not found in binding", - ), - ( - remote_specs._InputParameterSpec("arg", serializer="parquet"), - {"arg": 10}, - "Parquet serializer is only supported for", - ), - ( - remote_specs._InputParameterSpec("arg_0"), - _TEST_BINDING, - "Unsupported serializer:", - ), - ], - ) - @pytest.mark.usefixtures("google_auth_mock") - def test_input_format_arg_invalid(self, spec, binding, msg): - if msg == "Unsupported serializer:": - spec.serializer = "invalid" - with pytest.raises(ValueError, match=msg): - spec.format_arg("gs://bucket/path", binding) - - def test_output_parameter_spec_default(self): - param_spec = remote_specs._OutputParameterSpec("arg_0") - assert param_spec.name == "arg_0" - assert param_spec.argument_name == "arg_0" - assert param_spec.deserializer == "literal" - - def test_output_parameter_spec_argument_name(self): - param_spec = remote_specs._OutputParameterSpec("arg_0", argument_name="input_0") - assert param_spec.name == "arg_0" - assert param_spec.argument_name == "input_0" - assert param_spec.deserializer == "literal" - - def test_output_parameter_spec_argument_name_empty(self): - err_msg = "Output parameter name cannot be empty" - with pytest.raises(ValueError) as e: - remote_specs._OutputParameterSpec("") - assert re.match(err_msg, str(e.value)) - - @pytest.mark.parametrize("deserializer", ["literal", "cloudpickle"]) - def test_output_parameter_spec_serializer_valid(self, deserializer): - param_spec = remote_specs._OutputParameterSpec( - "arg_0", deserializer=deserializer - ) - assert param_spec.name == "arg_0" - assert param_spec.argument_name == "arg_0" - assert param_spec.deserializer == deserializer - - def test_output_parameter_spec_deserializer_invalid(self): - err_msg = "Invalid deserializer" - with pytest.raises(ValueError) as e: - remote_specs._OutputParameterSpec("arg_0", deserializer="invalid") - assert re.match(err_msg, str(e.value)) - - @pytest.mark.usefixtures("google_auth_mock") - def test_deserialize_output_literal( - self, mock_named_temp_file, mock_blob_download_to_filename - ): - spec = remote_specs._OutputParameterSpec( - "arg_0", deserializer=remote_specs._LITERAL - ) - test_path = "gs://bucket/path" - test_val = "output" - - with tempfile.TemporaryDirectory() as tmp_dir: - tmp_path = os.path.join(tmp_dir, "tmp_path") - mock_temp_file = mock_named_temp_file.return_value.__enter__() - mock_temp_file.name = tmp_path - - # Writes to a file to be read from. - with open(tmp_path, "w") as f: - f.write(test_val) - - # Tests reading literal output from GCS. - assert spec.deserialize_output(test_path) == test_val - mock_blob_download_to_filename.assert_called_once_with(filename=tmp_path) - - @pytest.mark.usefixtures("google_auth_mock") - def test_deserialize_output_cloudpickle( - self, mock_named_temp_file, mock_blob_download_to_filename - ): - spec = remote_specs._OutputParameterSpec( - "arg_1", deserializer=remote_specs._CLOUDPICKLE - ) - test_path = "gs://bucket/path" - test_val = cloudpickle.dumps(_TEST_BINDING["arg_1"]) - - with tempfile.TemporaryDirectory() as tmp_dir: - tmp_path = os.path.join(tmp_dir, "tmp_path") - mock_handler = mock_named_temp_file.return_value.__enter__() - mock_handler.name = tmp_path - - # Writes to a file to be read from. - with open(tmp_path, "wb") as f: - f.write(test_val) - - # Tests the deserialized output function works as expected. - with open(tmp_path, "rb") as f: - mock_handler.read = f.read - # Verifies that calling the functions return the same results. - assert spec.deserialize_output(test_path)(1) == _TEST_BINDING["arg_1"]( - 1 - ) - mock_blob_download_to_filename.assert_called_once_with( - filename=tmp_path - ) - - @pytest.mark.usefixtures("google_auth_mock") - def test_deserialize_output_invalid(self): - spec = remote_specs._OutputParameterSpec("arg_0") - spec.deserializer = "invalid" - with pytest.raises(ValueError, match="Unsupported deserializer:"): - spec.deserialize_output("gs://bucket/path") - - def test_gen_gcs_path(self): - base_dir = "gs://test_bucket" - name = "test_name" - expected_path = "gs://test_bucket/test_name" - assert remote_specs._gen_gcs_path(base_dir, name) == expected_path - - def test_gen_gcs_path_invalid(self): - base_dir = "test_bucket" - name = "test_name" - with pytest.raises(ValueError): - remote_specs._gen_gcs_path(base_dir, name) - - def test_gen_gcs_path_remove_suffix(self): - base_dir = "gs://test_bucket" - name = "test_name/" - expected_path = "gs://test_bucket/test_name" - assert remote_specs._gen_gcs_path(base_dir, name) == expected_path - - def test_generate_feature_metadata(self): - df = pd.DataFrame( - { - "col_int": [1, 2, 3], - "col_float": [0.1, 0.2, 0.3], - 0: [0, 1, 0], - "ignored_cat_0": ["0", "1", "0"], - "ignored_cat_1": ["a", "b", "c"], - "ignored_type": ["d", "e", "f"], - } - ) - df["col_int"] = df["col_int"].astype("int64") - df["col_float"] = df["col_float"].astype("float64") - df[0] = df[0].astype("category") - df["ignored_cat_0"] = df["ignored_cat_0"].astype("category") - df["ignored_cat_1"] = df["ignored_cat_1"].astype("category") - df["ignored_type"] = df["ignored_type"].astype("object") - - # ignored_cat and ignored_type do not have feature metadata - expected__metadata = { - "col_int": { - "dtype": "int64", - "feature_type": "dense", - }, - "col_float": { - "dtype": "float64", - "feature_type": "dense", - }, - "0": { - "dtype": "int64", - "feature_type": "dense", - "categories": [0, 1], - }, - } - - original_df = df.copy(deep=True) - assert remote_specs._generate_feature_metadata(df) == expected__metadata - - # Checks that the original dataframe is not modified - assert df.equals(original_df) - - def test_generate_feature_metadata_invalid(self): - with pytest.raises(ValueError, match="Generating feature metadata is"): - remote_specs._generate_feature_metadata([0, 1, 2]) - - -class TestClusterSpec: - """Tests for cluster spec classes and other distributed training helper functions.""" - - # pylint: disable=protected-access,missing-function-docstring - def test_invalid_cluster_info(self): - cluster = { - remote_specs._CHIEF: ["cmle-training-workerpool0-id-0:2222"], - "worker": ["cmle-training-workerpool1-id-0:2222"], - } - - err_msg = "Invalid task type: worker." - with pytest.raises(ValueError) as e: - remote_specs._Cluster(cluster) - assert re.match(err_msg, str(e.value)) - - def test_task_types(self): - cluster = remote_specs._Cluster(_get_vertex_cluster_spec()["cluster"]) - assert cluster.task_types == [ - remote_specs._CHIEF, - remote_specs._WORKER, - remote_specs._SERVER, - remote_specs._EVALUATOR, - ] - - @pytest.mark.parametrize( - "task_type,expected_num_tasks", - [ - (remote_specs._CHIEF, 1), - (remote_specs._WORKER, 3), - (remote_specs._SERVER, 3), - (remote_specs._EVALUATOR, 1), - ], - ) - def test_get_num_tasks(self, task_type, expected_num_tasks): - cluster = remote_specs._Cluster(_get_vertex_cluster_spec()["cluster"]) - assert cluster.get_num_tasks(task_type) == expected_num_tasks - - @pytest.mark.parametrize( - "task_type,expected_task_addresses", - [ - (remote_specs._CHIEF, ["cmle-training-workerpool0-id-0:2222"]), - ( - remote_specs._WORKER, - [ - "cmle-training-workerpool1-id-0:2222", - "cmle-training-workerpool1-id-1:2222", - "cmle-training-workerpool1-id-2:2222", - ], - ), - ( - remote_specs._SERVER, - [ - "cmle-training-workerpool2-id-0:2222", - "cmle-training-workerpool2-id-1:2222", - "cmle-training-workerpool2-id-2:2222", - ], - ), - (remote_specs._EVALUATOR, ["cmle-training-workerpool3-id-0:2222"]), - ], - ) - def test_get_task_addresses(self, task_type, expected_task_addresses): - cluster = remote_specs._Cluster(_get_vertex_cluster_spec()["cluster"]) - assert cluster.get_task_addresses(task_type) == expected_task_addresses - - @pytest.mark.parametrize( - "cluster_spec,expected_rank", - [ - ( - remote_specs._ClusterSpec( - _get_vertex_cluster_spec(remote_specs._CHIEF, 0) - ), - 0, - ), - ( - remote_specs._ClusterSpec( - _get_vertex_cluster_spec(remote_specs._WORKER, 2) - ), - 3, - ), - ( - remote_specs._ClusterSpec( - _get_vertex_cluster_spec(remote_specs._SERVER, 1) - ), - 5, - ), - ( - remote_specs._ClusterSpec( - _get_vertex_cluster_spec(remote_specs._EVALUATOR, 0) - ), - 7, - ), - ], - ) - def test_get_rank(self, cluster_spec, expected_rank): - assert cluster_spec.get_rank() == expected_rank - - def test_get_world_size(self): - cluster_spec = remote_specs._ClusterSpec(_get_vertex_cluster_spec()) - assert cluster_spec.get_world_size() == 8 - - def test_get_chief_address_port(self): - cluster_spec = remote_specs._ClusterSpec(_get_vertex_cluster_spec()) - assert cluster_spec.get_chief_address_port() == ( - "cmle-training-workerpool0-id-0", - 2222, - ) - - -# pylint: disable=protected-access -class TestWorkerPoolSpecs: - """Tests for worker pool spec classes and related functions.""" - - @pytest.mark.parametrize( - "worker_pool_specs,expected_spec", - [ - ( - remote_specs.WorkerPoolSpecs(_TEST_WORKER_POOL_SPEC_OBJ_MACHINE_TYPE), - [_TEST_WORKER_POOL_SPEC_MACHINE_TYPE_CONTAINER_SPEC], - ), - ( - remote_specs.WorkerPoolSpecs( - _TEST_WORKER_POOL_SPEC_OBJ_MACHINE_TYPE, - evaluator=_TEST_WORKER_POOL_SPEC_OBJ_MACHINE_TYPE, - ), - [ - _TEST_WORKER_POOL_SPEC_MACHINE_TYPE_CONTAINER_SPEC, - {}, - {}, - _TEST_WORKER_POOL_SPEC_MACHINE_TYPE_CONTAINER_SPEC, - ], - ), - ( - remote_specs.WorkerPoolSpecs( - _TEST_WORKER_POOL_SPEC_OBJ_MACHINE_TYPE, - server=_TEST_WORKER_POOL_SPEC_OBJ_MACHINE_TYPE, - ), - [ - _TEST_WORKER_POOL_SPEC_MACHINE_TYPE_CONTAINER_SPEC, - {}, - _TEST_WORKER_POOL_SPEC_MACHINE_TYPE_CONTAINER_SPEC, - ], - ), - ], - ) - def test_prepare_worker_pool_specs( - self, - worker_pool_specs: remote_specs.WorkerPoolSpecs, - expected_spec: List[Dict[str, Any]], - ): - assert ( - remote_specs._prepare_worker_pool_specs( - worker_pool_specs, "test-image", ["python3", "run.py"], [] - ) - == expected_spec - ) - - @pytest.mark.parametrize( - "cluster_spec_str,expected_output_path", - [ - ( - _TEST_CLUSTER_SPEC_CHIEF_STR, - os.path.join(_TEST_OUTPUT_PATH, "output_estimator"), - ), - ( - _TEST_CLUSTER_SPEC_WORKER_STR, - os.path.join(_TEST_OUTPUT_PATH, "temp/workerpool1_0"), - ), - ("", os.path.join(_TEST_OUTPUT_PATH, "output_estimator")), - ], - ) - def test_get_output_path_for_distributed_training( - self, cluster_spec_str, expected_output_path - ): - with mock.patch.dict( - os.environ, {remote_specs._CLUSTER_SPEC: cluster_spec_str}, clear=True - ): - with mock.patch("os.makedirs"): - output_path = remote_specs._get_output_path_for_distributed_training( - _TEST_OUTPUT_PATH, "output_estimator" - ) - assert output_path == expected_output_path - - # Temporarily remove these tests since they require tensorflow >= 2.12.0 - # but in our external test environment tf 2.12 is not available due to conflict - # TODO(jayceeli) Add these tests back once we fix the external environment issue. - - # def test_set_keras_distributed_strategy_enable_distributed_multi_worker(self): - # model = tf.keras.Sequential( - # [tf.keras.layers.Dense(5, input_shape=(4,)), tf.keras.layers.Softmax()] - # ) - # model.compile(optimizer="adam", loss="mean_squared_error") - # with mock.patch.dict( - # os.environ, - # {remote_specs._CLUSTER_SPEC: _TEST_CLUSTER_SPEC_CHIEF_STR}, - # clear=True, - # ): - # strategy = remote_specs._get_keras_distributed_strategy(True, None) - # updated_model = remote_specs._set_keras_distributed_strategy( - # model, strategy - # ) - - # assert updated_model.get_config() == model.get_config() - # assert updated_model.get_compile_config() == model.get_compile_config() - # assert "CollectiveAllReduceStrategy" in str( - # type(updated_model.distribute_strategy) - # ) - - # def test_set_keras_distributed_strategy_enable_distributed_multi_gpu(self): - # model = tf.keras.Sequential( - # [tf.keras.layers.Dense(5, input_shape=(4,)), tf.keras.layers.Softmax()] - # ) - # model.compile(optimizer="adam", loss="mean_squared_error") - # # no cluster_spec is set for single worker training - # strategy = remote_specs._get_keras_distributed_strategy(True, None) - # updated_model = remote_specs._set_keras_distributed_strategy(model, strategy) - - # assert updated_model.get_config() == model.get_config() - # assert updated_model.get_compile_config() == model.get_compile_config() - # assert "MirroredStrategy" in str(type(updated_model.distribute_strategy)) - - # def test_set_keras_distributed_strategy_multi_gpu(self): - # model = tf.keras.Sequential( - # [tf.keras.layers.Dense(5, input_shape=(4,)), tf.keras.layers.Softmax()] - # ) - # model.compile(optimizer="adam", loss="mean_squared_error") - # # no cluster_spec is set for single worker training - # strategy = remote_specs._get_keras_distributed_strategy(False, 3) - # updated_model = remote_specs._set_keras_distributed_strategy(model, strategy) - - # assert updated_model.get_config() == model.get_config() - # assert updated_model.get_compile_config() == model.get_compile_config() - # assert "MirroredStrategy" in str(type(updated_model.distribute_strategy)) - - @mock.patch.dict(os.environ, {}, clear=True) - @mock.patch.object(torch.distributed, "init_process_group") - @mock.patch("torch.nn.parallel.DistributedDataParallel") - def test_setup_pytorch_distributed_training( - self, - mock_distributed_data_parallel, - mock_init_process_group, - ): - class TestClass(vertexai.preview.VertexModel, torch.nn.Module): - def __init__(self): - torch.nn.Module.__init__(self) - vertexai.preview.VertexModel.__init__(self) - self.linear = torch.nn.Linear(4, 3) - self.softmax = torch.nn.Softmax(dim=1) - - def forward(self, x): - return self.softmax(self.linear(x)) - - @vertexai.preview.developer.mark.train() - def test_method(self): - return - - model = TestClass() - setattr( - model, - "cluster_spec", - remote_specs._ClusterSpec(json.loads(_TEST_CLUSTER_SPEC_CHIEF_STR)), - ) - setattr(model, "_enable_cuda", False) - - output = remote_specs.setup_pytorch_distributed_training(model) - - mock_init_process_group.assert_called_once_with( - backend="gloo", rank=0, world_size=2 - ) - mock_distributed_data_parallel.assert_called_once_with(model) - - assert ( - os.getenv(remote_specs._MASTER_ADDR) - == "cmle-training-workerpool0-1d969d3ba6-0" - ) - assert os.getenv(remote_specs._MASTER_PORT) == "2222" - assert next(output.parameters()).is_cpu - - @mock.patch.dict(os.environ, {}, clear=True) - def test_setup_pytorch_distributed_training_no_cluster_spec(self): - class TestClass(vertexai.preview.VertexModel, torch.nn.Module): - def __init__(self): - torch.nn.Module.__init__(self) - vertexai.preview.VertexModel.__init__(self) - self.linear = torch.nn.Linear(4, 3) - self.softmax = torch.nn.Softmax(dim=1) - - def forward(self, x): - return self.softmax(self.linear(x)) - - @vertexai.preview.developer.mark.train() - def test_method(self): - return - - model = TestClass() - - assert model == remote_specs.setup_pytorch_distributed_training(model) diff --git a/tests/unit/vertexai/test_remote_training.py b/tests/unit/vertexai/test_remote_training.py deleted file mode 100644 index aed34ea75f..0000000000 --- a/tests/unit/vertexai/test_remote_training.py +++ /dev/null @@ -1,2167 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import copy -from importlib import reload -import os -import re -import sys -from unittest import mock -from unittest.mock import patch - -import cloudpickle -from google import auth -from google.api_core import exceptions -from google.auth import credentials as auth_credentials -from google.cloud import aiplatform -from google.cloud.aiplatform import utils -from google.cloud.aiplatform.compat.services import ( - job_service_client_v1beta1 as job_service_client, - tensorboard_service_client, -) -from google.cloud.aiplatform.compat.types import ( - custom_job_v1beta1 as gca_custom_job_compat, -) -from google.cloud.aiplatform.compat.types import ( - execution_v1beta1 as gca_execution, -) -from google.cloud.aiplatform.compat.types import io_v1beta1 as gca_io_compat -from google.cloud.aiplatform.compat.types import ( - job_state_v1beta1 as gca_job_state_compat, -) -from google.cloud.aiplatform.compat.types import ( - tensorboard as gca_tensorboard, -) -from google.cloud.aiplatform.metadata import constants as metadata_constants -from google.cloud.aiplatform.preview import resource_pool_utils -from google.cloud.aiplatform_v1 import ( - Context as GapicContext, - MetadataServiceClient, - MetadataStore as GapicMetadataStore, - TensorboardServiceClient, -) -import vertexai -from vertexai.preview._workflow.executor import ( - training, -) -from vertexai.preview._workflow.serialization_engine import ( - any_serializer, - serializers_base, -) -from vertexai.preview._workflow.shared import configs -from vertexai.preview._workflow.shared import ( - supported_frameworks, -) -from vertexai.preview.developer import remote_specs -import numpy as np -import pytest -import sklearn -from sklearn.datasets import load_iris -from sklearn.linear_model import _logistic -from sklearn.model_selection import train_test_split -import tensorflow as tf -import tensorflow.keras - - -# Manually set tensorflow version for b/295580335 -tf.__version__ = "2.12.0" - - -# vertexai constants -_TEST_PROJECT = "test-project" -_TEST_PROJECT_NUMBER = 12345678 -_TEST_LOCATION = "us-central1" -_TEST_PARENT = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}" -_TEST_BUCKET_NAME = "gs://test-bucket" -_TEST_UNIQUE_NAME = "test-unique-name" -_TEST_REMOTE_JOB_NAME = f"remote-job-{_TEST_UNIQUE_NAME}" -_TEST_REMOTE_JOB_BASE_PATH = os.path.join(_TEST_BUCKET_NAME, _TEST_REMOTE_JOB_NAME) -_TEST_EXPERIMENT = "test-experiment" -_TEST_EXPERIMENT_RUN = "test-experiment-run" -_TEST_SERVICE_ACCOUNT = f"{_TEST_PROJECT_NUMBER}-compute@developer.gserviceaccount.com" -_TEST_CREDENTIALS = mock.Mock(spec=auth_credentials.AnonymousCredentials()) - -# dataset constants -dataset = load_iris() -_X_TRAIN, _X_TEST, _Y_TRAIN, _Y_TEST = train_test_split( - dataset.data, dataset.target, test_size=0.2, random_state=42 -) - -# custom job constants -_TEST_CUSTOM_JOB_NAME = f"{_TEST_PARENT}/customJobs/12345" -_TEST_UPGRADE_PIP_COMMAND = ( - "export PIP_ROOT_USER_ACTION=ignore && " "pip install --upgrade pip && " -) -_TEST_BASE_DEPS = f"'{training.VERTEX_AI_DEPENDENCY_PATH}' 'absl-py==1.4.0' " -_TEST_CUSTOM_COMMAND = "apt-get update && " "apt-get install -y git && " -_TEST_DEPS = ( - f"'scikit-learn=={sklearn.__version__}' " - f"'numpy=={np.__version__}' " - f"'cloudpickle=={cloudpickle.__version__}' " -) -_TEST_USER_DEPS = ( - f"'torch_cv' " - f"'xgboost==1.6.0' " - f"'numpy' " - f"'scikit-learn=={sklearn.__version__}' " - f"'cloudpickle=={cloudpickle.__version__}' " -) -_TEST_TRAINING_COMMAND = ( - "python3 -m vertexai.preview._workflow.executor.training_script " - "--pass_through_int_args= " - "--pass_through_float_args= " - "--pass_through_str_args= " - "--pass_through_bool_args= " - f"--input_path={os.path.join(_TEST_REMOTE_JOB_BASE_PATH, 'input').replace('gs://', '/gcs/', 1)} " - f"--output_path={os.path.join(_TEST_REMOTE_JOB_BASE_PATH, 'output').replace('gs://', '/gcs/', 1)} " - "--method_name=fit " - f"--arg_names=X,y " - "--enable_cuda=False " - "--enable_distributed=False " - "--accelerator_count=0" -) - -_TEST_AUTOLOG_COMMAND = ( - _TEST_UPGRADE_PIP_COMMAND - + "pip install " - + _TEST_BASE_DEPS.replace( - training.VERTEX_AI_DEPENDENCY_PATH, - training.VERTEX_AI_DEPENDENCY_PATH_AUTOLOGGING, - ) - + _TEST_DEPS - + "&& " - + _TEST_TRAINING_COMMAND - + " --enable_autolog" -) -_TEST_REPLICA_COUNT = 1 -_TEST_MACHINE_TYPE = "n1-standard-4" -_TEST_ACCELERATOR_TYPE = "NVIDIA_TESLA_K80" -_TEST_ACCELERATOR_COUNT = 2 -_TEST_WORKER_POOL_SPEC = [ - { - "machine_spec": { - "machine_type": _TEST_MACHINE_TYPE, - }, - "replica_count": _TEST_REPLICA_COUNT, - "container_spec": { - "image_uri": f"python:{supported_frameworks._get_python_minor_version()}", - "command": ["sh", "-c"] - + [ - _TEST_UPGRADE_PIP_COMMAND - + "pip install " - + _TEST_BASE_DEPS - + _TEST_DEPS - + "&& " - + _TEST_TRAINING_COMMAND - ], - "args": [], - }, - } -] -_TEST_CUSTOM_JOB_PROTO = gca_custom_job_compat.CustomJob( - display_name=_TEST_REMOTE_JOB_NAME, - job_spec={ - "worker_pool_specs": _TEST_WORKER_POOL_SPEC, - "base_output_directory": gca_io_compat.GcsDestination( - output_uri_prefix=_TEST_REMOTE_JOB_BASE_PATH - ), - }, -) - -# RemoteConfig constants -_TEST_TRAINING_CONFIG_DISPLAY_NAME = "test-training-config-display-name" -_TEST_TRAINING_CONFIG_STAGING_BUCKET = "gs://test-training-config-staging-bucket" -_TEST_TRAINING_CONFIG_CONTAINER_URI = "gcr.io/custom-image" -_TEST_TRAINING_CONFIG_MACHINE_TYPE = "n1-highmem-4" -_TEST_TRAINING_CONFIG_ACCELERATOR_TYPE = "NVIDIA_TESLA_K80" -_TEST_TRAINING_CONFIG_ACCELERATOR_COUNT = 4 -_TEST_REQUIREMENTS = ["torch_cv", "xgboost==1.6.0", "numpy"] -_TEST_CUSTOM_COMMANDS = ["apt-get update", "apt-get install -y git"] - -_TEST_BOOT_DISK_TYPE = "test_boot_disk_type" -_TEST_BOOT_DISK_SIZE_GB = 10 -_TEST_TRAINING_CONFIG_WORKER_POOL_SPECS = remote_specs.WorkerPoolSpecs( - chief=remote_specs.WorkerPoolSpec( - machine_type=_TEST_TRAINING_CONFIG_MACHINE_TYPE, - replica_count=1, - boot_disk_type=_TEST_BOOT_DISK_TYPE, - boot_disk_size_gb=_TEST_BOOT_DISK_SIZE_GB, - ) -) -_TEST_TRAINING_CONFIG_WORKER_POOL_SPECS_GPU = remote_specs.WorkerPoolSpecs( - chief=remote_specs.WorkerPoolSpec( - machine_type=_TEST_TRAINING_CONFIG_MACHINE_TYPE, - accelerator_count=_TEST_TRAINING_CONFIG_ACCELERATOR_COUNT, - accelerator_type=_TEST_TRAINING_CONFIG_ACCELERATOR_TYPE, - replica_count=1, - boot_disk_type=_TEST_BOOT_DISK_TYPE, - boot_disk_size_gb=_TEST_BOOT_DISK_SIZE_GB, - ) -) - -_TEST_CONTEXT_ID = _TEST_EXPERIMENT -_TEST_CONTEXT_NAME = f"{_TEST_PARENT}/contexts/{_TEST_CONTEXT_ID}" -_TEST_EXPERIMENT_DESCRIPTION = "test-experiment-description" -_TEST_ID = "1028944691210842416" -_TEST_TENSORBOARD_NAME = f"{_TEST_PARENT}/tensorboards/{_TEST_ID}" -_TEST_EXECUTION_ID = f"{_TEST_EXPERIMENT}-{_TEST_EXPERIMENT_RUN}" -_TEST_EXPERIMENT_RUN_CONTEXT_NAME = f"{_TEST_PARENT}/contexts/{_TEST_EXECUTION_ID}" -_TEST_METADATASTORE = ( - f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/metadataStores/default" -) - -_EXPERIMENT_MOCK = GapicContext( - name=_TEST_CONTEXT_NAME, - display_name=_TEST_EXPERIMENT, - description=_TEST_EXPERIMENT_DESCRIPTION, - schema_title=metadata_constants.SYSTEM_EXPERIMENT, - schema_version=metadata_constants.SCHEMA_VERSIONS[ - metadata_constants.SYSTEM_EXPERIMENT - ], - metadata={**metadata_constants.EXPERIMENT_METADATA}, -) - -_EXPERIMENT_MOCK.metadata[ - metadata_constants._BACKING_TENSORBOARD_RESOURCE_KEY -] = _TEST_TENSORBOARD_NAME - -_EXPERIMENT_RUN_MOCK = GapicContext( - name=_TEST_EXPERIMENT_RUN_CONTEXT_NAME, - display_name=_TEST_EXPERIMENT_RUN, - schema_title=metadata_constants.SYSTEM_EXPERIMENT_RUN, - schema_version=metadata_constants.SCHEMA_VERSIONS[ - metadata_constants.SYSTEM_EXPERIMENT_RUN - ], - metadata={ - metadata_constants._PARAM_KEY: {}, - metadata_constants._METRIC_KEY: {}, - metadata_constants._STATE_KEY: gca_execution.Execution.State.RUNNING.name, - }, -) - - -_TEST_DEFAULT_TENSORBOARD_NAME = "test-tensorboard-default-name" - -_TEST_DEFAULT_TENSORBOARD_GCA = gca_tensorboard.Tensorboard( - name=_TEST_DEFAULT_TENSORBOARD_NAME, - is_default=True, -) - -_TEST_PERSISTENT_RESOURCE_ID = "test-cluster" -_TEST_PERSISTENT_RESOURCE_CONFIG = configs.PersistentResourceConfig( - name=_TEST_PERSISTENT_RESOURCE_ID, - resource_pools=[ - remote_specs.ResourcePool( - replica_count=_TEST_REPLICA_COUNT, - ), - remote_specs.ResourcePool( - machine_type="n1-standard-8", - replica_count=2, - ), - ], -) - -_TEST_PERSISTENT_RESOURCE_CONFIG_SERVICE_ACCOUNT = configs.PersistentResourceConfig( - name=_TEST_PERSISTENT_RESOURCE_ID, - resource_pools=[ - remote_specs.ResourcePool( - replica_count=_TEST_REPLICA_COUNT, - ), - remote_specs.ResourcePool( - machine_type="n1-standard-8", - replica_count=2, - ), - ], - service_account=_TEST_SERVICE_ACCOUNT, -) - -_TEST_PERSISTENT_RESOURCE_CONFIG_DISABLE = configs.PersistentResourceConfig( - name=_TEST_PERSISTENT_RESOURCE_ID, - resource_pools=[ - remote_specs.ResourcePool( - replica_count=_TEST_REPLICA_COUNT, - ), - remote_specs.ResourcePool( - machine_type="n1-standard-8", - replica_count=2, - ), - ], - disable=True, -) - - -@pytest.fixture -def list_default_tensorboard_mock(): - with patch.object( - TensorboardServiceClient, "list_tensorboards" - ) as list_default_tensorboard_mock: - list_default_tensorboard_mock.side_effect = [ - [_TEST_DEFAULT_TENSORBOARD_GCA], - ] - yield list_default_tensorboard_mock - - -def _get_custom_job_proto( - display_name=None, - staging_bucket=None, - container_uri=None, - machine_type=None, - accelerator_type=None, - accelerator_count=None, - replica_count=None, - boot_disk_type=None, - boot_disk_size_gb=None, - service_account=None, - experiment=None, - experiment_run=None, - autolog_enabled=False, - cuda_enabled=False, - distributed_enabled=False, - model=None, - user_requirements=False, - custom_commands=False, - persistent_resource_id=None, -): - job = copy.deepcopy(_TEST_CUSTOM_JOB_PROTO) - if display_name: - job.display_name = display_name - if container_uri: - job.job_spec.worker_pool_specs[0].container_spec.image_uri = container_uri - job.job_spec.worker_pool_specs[0].container_spec.command[-1] = ( - _TEST_UPGRADE_PIP_COMMAND - + "pip install " - + _TEST_BASE_DEPS - + "&& " - + _TEST_TRAINING_COMMAND - ) - if user_requirements: - job.job_spec.worker_pool_specs[0].container_spec.command[-1] = ( - _TEST_UPGRADE_PIP_COMMAND - + "pip install " - + _TEST_BASE_DEPS - + _TEST_USER_DEPS - + "&& " - + _TEST_TRAINING_COMMAND - ) - if custom_commands: - job.job_spec.worker_pool_specs[0].container_spec.command[-1] = ( - _TEST_UPGRADE_PIP_COMMAND.replace("&& ", f"&& {_TEST_CUSTOM_COMMAND}", 1) - + "pip install " - + _TEST_BASE_DEPS - + _TEST_DEPS - + "&& " - + _TEST_TRAINING_COMMAND - ) - if autolog_enabled: - job.job_spec.worker_pool_specs[0].container_spec.command[ - -1 - ] = _TEST_AUTOLOG_COMMAND - if isinstance(model, tf.Module): - command = job.job_spec.worker_pool_specs[0].container_spec.command - for i, s in enumerate(command): - s = s.replace( - f"scikit-learn=={sklearn.__version__}", f"tensorflow=={tf.__version__}" - ) - s = s.replace("--arg_names=X,y", "--arg_names=x,y") - command[i] = s - job.job_spec.worker_pool_specs[0].container_spec.command = command - if cuda_enabled: - if not container_uri: - job.job_spec.worker_pool_specs[ - 0 - ].container_spec.image_uri = supported_frameworks._get_gpu_container_uri( - model - ) - job.job_spec.worker_pool_specs[0].machine_spec.machine_type = "n1-standard-16" - job.job_spec.worker_pool_specs[ - 0 - ].machine_spec.accelerator_type = "NVIDIA_TESLA_P100" - job.job_spec.worker_pool_specs[0].machine_spec.accelerator_count = 1 - command = job.job_spec.worker_pool_specs[0].container_spec.command - job.job_spec.worker_pool_specs[0].container_spec.command = [ - s.replace("--enable_cuda=False", "--enable_cuda=True") for s in command - ] - if distributed_enabled: - command = job.job_spec.worker_pool_specs[0].container_spec.command - job.job_spec.worker_pool_specs[0].container_spec.command = [ - s.replace("--enable_distributed=False", "--enable_distributed=True") - for s in command - ] - if machine_type: - job.job_spec.worker_pool_specs[0].machine_spec.machine_type = machine_type - if accelerator_type: - job.job_spec.worker_pool_specs[ - 0 - ].machine_spec.accelerator_type = accelerator_type - if accelerator_count: - job.job_spec.worker_pool_specs[ - 0 - ].machine_spec.accelerator_count = accelerator_count - if not distributed_enabled: - command = job.job_spec.worker_pool_specs[0].container_spec.command - job.job_spec.worker_pool_specs[0].container_spec.command = [ - s.replace( - "--accelerator_count=0", - f"--accelerator_count={accelerator_count}", - ) - for s in command - ] - if replica_count: - job.job_spec.worker_pool_specs[0].replica_count = replica_count - if boot_disk_type: - job.job_spec.worker_pool_specs[0].disk_spec.boot_disk_type = boot_disk_type - if boot_disk_size_gb: - job.job_spec.worker_pool_specs[ - 0 - ].disk_spec.boot_disk_size_gb = boot_disk_size_gb - if staging_bucket: - job.job_spec.base_output_directory = gca_io_compat.GcsDestination( - output_uri_prefix=os.path.join(staging_bucket, _TEST_REMOTE_JOB_NAME) - ) - command = job.job_spec.worker_pool_specs[0].container_spec.command - job.job_spec.worker_pool_specs[0].container_spec.command = [ - s.replace(_TEST_BUCKET_NAME[5:], staging_bucket[5:]) for s in command - ] - if service_account: - job.job_spec.service_account = service_account - if experiment: - env = job.job_spec.worker_pool_specs[0].container_spec.env - env.append({"name": metadata_constants.ENV_EXPERIMENT_KEY, "value": experiment}) - if experiment_run: - env = job.job_spec.worker_pool_specs[0].container_spec.env - env.append( - {"name": metadata_constants.ENV_EXPERIMENT_RUN_KEY, "value": experiment_run} - ) - if persistent_resource_id: - job.job_spec.persistent_resource_id = persistent_resource_id - job.labels = ({"trained_by_vertex_ai": "true"},) - return job - - -@pytest.fixture -def mock_timestamped_unique_name(): - with patch.object(utils, "timestamped_unique_name") as mock_timestamped_unique_name: - mock_timestamped_unique_name.return_value = _TEST_UNIQUE_NAME - yield mock_timestamped_unique_name - - -@pytest.fixture -def mock_autolog_enabled(): - with patch.object( - utils.autologging_utils, "_is_autologging_enabled" - ) as autolog_enabled: - autolog_enabled.return_value = True - yield autolog_enabled - - -@pytest.fixture -def mock_autolog_disabled(): - with patch.object( - utils.autologging_utils, "_is_autologging_enabled" - ) as autolog_disabled: - autolog_disabled.return_value = False - yield autolog_disabled - - -@pytest.fixture -def mock_get_project_number(): - with patch.object( - utils.resource_manager_utils, "get_project_number" - ) as mock_get_project_number: - mock_get_project_number.return_value = _TEST_PROJECT_NUMBER - yield mock_get_project_number - - -@pytest.fixture -def mock_get_experiment_run(): - with patch.object(MetadataServiceClient, "get_context") as mock_get_experiment_run: - mock_get_experiment_run.side_effect = [ - _EXPERIMENT_MOCK, - _EXPERIMENT_RUN_MOCK, - _EXPERIMENT_RUN_MOCK, - ] - - yield mock_get_experiment_run - - -@pytest.fixture -def mock_get_metadata_store(): - with patch.object( - MetadataServiceClient, "get_metadata_store" - ) as mock_get_metadata_store: - mock_get_metadata_store.return_value = GapicMetadataStore( - name=_TEST_METADATASTORE, - ) - yield mock_get_metadata_store - - -@pytest.fixture -def get_artifact_not_found_mock(): - with patch.object(MetadataServiceClient, "get_artifact") as get_artifact_mock: - get_artifact_mock.side_effect = exceptions.NotFound("") - yield get_artifact_mock - - -# we've tested AnySerializer in `test_serializers.py` -# so here we mock the SDK methods directly -@pytest.fixture -def mock_any_serializer_serialize_sklearn(): - with patch.object( - any_serializer.AnySerializer, - "serialize", - side_effect=[ - { - serializers_base.SERIALIZATION_METADATA_DEPENDENCIES_KEY: [ - f"scikit-learn=={sklearn.__version__}" - ], - serializers_base.SERIALIZATION_METADATA_CUSTOM_COMMANDS_KEY: [], - }, - { - serializers_base.SERIALIZATION_METADATA_DEPENDENCIES_KEY: [ - f"numpy=={np.__version__}", - f"cloudpickle=={cloudpickle.__version__}", - ], - serializers_base.SERIALIZATION_METADATA_CUSTOM_COMMANDS_KEY: [], - }, - { - serializers_base.SERIALIZATION_METADATA_DEPENDENCIES_KEY: [ - f"numpy=={np.__version__}", - f"cloudpickle=={cloudpickle.__version__}", - ], - serializers_base.SERIALIZATION_METADATA_CUSTOM_COMMANDS_KEY: [], - }, - { - serializers_base.SERIALIZATION_METADATA_DEPENDENCIES_KEY: [ - f"numpy=={np.__version__}", - f"cloudpickle=={cloudpickle.__version__}", - ], - serializers_base.SERIALIZATION_METADATA_CUSTOM_COMMANDS_KEY: [], - }, - ], - ) as mock_any_serializer_serialize: - yield mock_any_serializer_serialize - - -@pytest.fixture -def mock_any_serializer_save_global_metadata(): - with patch.object( - any_serializer.AnySerializer, - "save_global_metadata", - ) as mock_any_serializer_save_global_metadata: - yield mock_any_serializer_save_global_metadata - - -@pytest.fixture -def mock_any_serializer_load_global_metadata(): - with patch.object( - any_serializer.AnySerializer, - "load_global_metadata", - ) as mock_any_serializer_load_global_metadata: - yield mock_any_serializer_load_global_metadata - - -@pytest.fixture -def mock_any_serializer_sklearn( - mock_any_serializer_serialize_sklearn, mock_any_serializer_deserialize_sklearn -): - with patch.object( - any_serializer, - "AnySerializer", - ) as mock_any_serializer_obj: - model = _logistic.LogisticRegression() - model.fit(_X_TRAIN, _Y_TRAIN) - mock_any_serializer_obj.return_value.deserialize = ( - mock_any_serializer_deserialize_sklearn - ) - mock_any_serializer_obj.return_value.serialize = ( - mock_any_serializer_serialize_sklearn - ) - yield mock_any_serializer_obj - - -@pytest.fixture -def mock_any_serializer_deserialize_sklearn(): - with patch.object( - any_serializer.AnySerializer, "deserialize" - ) as mock_any_serializer_deserialize_sklearn: - model = _logistic.LogisticRegression() - returned_model = model.fit(_X_TRAIN, _Y_TRAIN) - mock_any_serializer_deserialize_sklearn.side_effect = [model, returned_model] - yield mock_any_serializer_deserialize_sklearn - - -@pytest.fixture -def mock_any_serializer_keras( - mock_any_serializer_serialize_keras, mock_any_serializer_deserialize_keras -): - with patch.object( - any_serializer, - "AnySerializer", - ) as mock_any_serializer_obj: - model = _logistic.LogisticRegression() - model.fit(_X_TRAIN, _Y_TRAIN) - mock_any_serializer_obj.return_value.deserialize = ( - mock_any_serializer_deserialize_keras - ) - mock_any_serializer_obj.return_value.serialize = ( - mock_any_serializer_serialize_keras - ) - yield mock_any_serializer_obj - - -@pytest.fixture -def mock_any_serializer_serialize_keras(): - with patch.object( - any_serializer.AnySerializer, - "serialize", - side_effect=[ - { - serializers_base.SERIALIZATION_METADATA_DEPENDENCIES_KEY: [ - f"tensorflow=={tf.__version__}" - ], - serializers_base.SERIALIZATION_METADATA_CUSTOM_COMMANDS_KEY: [], - }, - { - serializers_base.SERIALIZATION_METADATA_DEPENDENCIES_KEY: [ - f"numpy=={np.__version__}", - f"cloudpickle=={cloudpickle.__version__}", - ], - serializers_base.SERIALIZATION_METADATA_CUSTOM_COMMANDS_KEY: [], - }, - { - serializers_base.SERIALIZATION_METADATA_DEPENDENCIES_KEY: [ - f"numpy=={np.__version__}", - f"cloudpickle=={cloudpickle.__version__}", - ], - serializers_base.SERIALIZATION_METADATA_CUSTOM_COMMANDS_KEY: [], - }, - { - serializers_base.SERIALIZATION_METADATA_DEPENDENCIES_KEY: [ - f"numpy=={np.__version__}", - f"cloudpickle=={cloudpickle.__version__}", - ], - serializers_base.SERIALIZATION_METADATA_CUSTOM_COMMANDS_KEY: [], - }, - ], - ) as mock_any_serializer_serialize: - yield mock_any_serializer_serialize - - -@pytest.fixture -def mock_any_serializer_deserialize_keras(): - with patch.object( - any_serializer.AnySerializer, "deserialize" - ) as mock_any_serializer_deserialize_keras: - model = tf.keras.Sequential( - [tf.keras.layers.Dense(5, input_shape=(4,)), tf.keras.layers.Softmax()] - ) - model.compile(optimizer="adam", loss="mean_squared_error") - returned_history = model.fit(_X_TRAIN, _Y_TRAIN) - mock_any_serializer_deserialize_keras.side_effect = [model, returned_history] - yield mock_any_serializer_deserialize_keras - - -@pytest.fixture -def mock_create_custom_job(): - with mock.patch.object( - job_service_client.JobServiceClient, "create_custom_job" - ) as mock_create_custom_job: - custom_job_proto = _get_custom_job_proto() - custom_job_proto.name = _TEST_CUSTOM_JOB_NAME - custom_job_proto.state = gca_job_state_compat.JobState.JOB_STATE_PENDING - mock_create_custom_job.return_value = custom_job_proto - yield mock_create_custom_job - - -@pytest.fixture -def mock_get_custom_job(): - with patch.object( - job_service_client.JobServiceClient, "get_custom_job" - ) as mock_get_custom_job: - custom_job_proto = _get_custom_job_proto() - custom_job_proto.name = _TEST_CUSTOM_JOB_NAME - custom_job_proto.state = gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED - mock_get_custom_job.return_value = custom_job_proto - yield mock_get_custom_job - - -@pytest.fixture -def update_context_mock(): - with patch.object(MetadataServiceClient, "update_context") as update_context_mock: - update_context_mock.side_effect = [_EXPERIMENT_RUN_MOCK] * 4 - yield update_context_mock - - -@pytest.fixture -def aiplatform_autolog_mock(): - with patch.object(aiplatform, "autolog") as aiplatform_autolog_mock: - yield aiplatform_autolog_mock - - -@pytest.fixture(scope="module") -def google_auth_mock(): - with mock.patch.object(auth, "default") as auth_mock: - auth_mock.return_value = ( - auth_credentials.AnonymousCredentials(), - "test-project", - ) - yield auth_mock - - -@pytest.fixture -def get_tensorboard_mock(): - with patch.object( - tensorboard_service_client.TensorboardServiceClient, "get_tensorboard" - ) as get_tensorboard_mock: - get_tensorboard_mock.return_value = _TEST_DEFAULT_TENSORBOARD_GCA - yield get_tensorboard_mock - - -# unittest `assert_any_call` method doesn't work when arguments contain `np.ndarray` -# https://stackoverflow.com/questions/56644729/mock-assert-mock-calls-with-a-numpy-array-as-argument-raises-valueerror-and-np -# tentatively runtime patch `assert_any_call` to solve this issue -def assert_any_call_for_numpy(self, **kwargs): - """Used by vertexai Serializer mock, only check kwargs.""" - found = False - for call in self.call_args_list: - equal = True - actual_kwargs = call[1] - for k, v in actual_kwargs.items(): - if k not in kwargs: - equal = False - break - try: - equal = v == kwargs[k] - except ValueError: - equal = False - equal = equal.all() if isinstance(equal, np.ndarray) else equal - if not equal: - break - - if equal and len(actual_kwargs) == len(kwargs): - found = True - break - - if not found: - raise AssertionError(f"{kwargs} not found.") - - -mock.Mock.assert_any_call = assert_any_call_for_numpy - -# TODO(zhenyiqi) fix external unit test failure caused by this method -training._add_indirect_dependency_versions = lambda x: x - - -@pytest.mark.usefixtures("google_auth_mock", "mock_cloud_logging_list_entries") -class TestRemoteTraining: - def setup_method(self): - reload(aiplatform.initializer) - reload(aiplatform) - reload(vertexai.preview.initializer) - reload(vertexai) - reload(_logistic) - reload(tf.keras) - - def teardown_method(self): - aiplatform.initializer.global_pool.shutdown(wait=True) - - @pytest.mark.usefixtures( - "mock_timestamped_unique_name", "mock_get_custom_job", "mock_autolog_disabled" - ) - def test_remote_training_sklearn( - self, - mock_any_serializer_sklearn, - mock_create_custom_job, - ): - vertexai.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - staging_bucket=_TEST_BUCKET_NAME, - ) - vertexai.preview.init(remote=True) - - LogisticRegression = vertexai.preview.remote(_logistic.LogisticRegression) - model = LogisticRegression() - model.fit(_X_TRAIN, _Y_TRAIN) - - # check that model is serialized correctly - mock_any_serializer_sklearn.return_value.serialize.assert_any_call( - to_serialize=model, - gcs_path=os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "input/input_estimator"), - ) - - # check that args are serialized correctly - mock_any_serializer_sklearn.return_value.serialize.assert_any_call( - to_serialize=_X_TRAIN, - gcs_path=os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "input/X"), - ) - mock_any_serializer_sklearn.return_value.serialize.assert_any_call( - to_serialize=_Y_TRAIN, - gcs_path=os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "input/y"), - ) - - # ckeck that CustomJob is created correctly - expected_custom_job = _get_custom_job_proto() - mock_create_custom_job.assert_called_once_with( - parent=_TEST_PARENT, - custom_job=expected_custom_job, - timeout=None, - ) - - # check that trained model is deserialized correctly - mock_any_serializer_sklearn.return_value.deserialize.assert_has_calls( - [ - mock.call( - os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "output/output_estimator") - ), - mock.call( - os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "output/output_data") - ), - ] - ) - - # change to `vertexai.preview.init(remote=False)` to use local prediction - vertexai.preview.init(remote=False) - - # check that local model is updated in place - # `model.score` raises NotFittedError if the model is not updated - model.score(_X_TEST, _Y_TEST) - - @pytest.mark.usefixtures( - "mock_timestamped_unique_name", "mock_get_custom_job", "mock_autolog_disabled" - ) - def test_remote_training_sklearn_with_user_requirements( - self, - mock_any_serializer_sklearn, - mock_create_custom_job, - ): - vertexai.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - staging_bucket=_TEST_BUCKET_NAME, - ) - vertexai.preview.init(remote=True) - - LogisticRegression = vertexai.preview.remote(_logistic.LogisticRegression) - model = LogisticRegression() - model.fit.vertex.remote_config.requirements = _TEST_REQUIREMENTS - - model.fit(_X_TRAIN, _Y_TRAIN) - - # check that model is serialized correctly - mock_any_serializer_sklearn.return_value.serialize.assert_any_call( - to_serialize=model, - gcs_path=os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "input/input_estimator"), - ) - - # check that args are serialized correctly - mock_any_serializer_sklearn.return_value.serialize.assert_any_call( - to_serialize=_X_TRAIN, - gcs_path=os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "input/X"), - ) - mock_any_serializer_sklearn.return_value.serialize.assert_any_call( - to_serialize=_Y_TRAIN, - gcs_path=os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "input/y"), - ) - - # ckeck that CustomJob is created correctly - expected_custom_job = _get_custom_job_proto(user_requirements=True) - mock_create_custom_job.assert_called_once_with( - parent=_TEST_PARENT, - custom_job=expected_custom_job, - timeout=None, - ) - - # check that trained model is deserialized correctly - mock_any_serializer_sklearn.return_value.deserialize.assert_has_calls( - [ - mock.call( - os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "output/output_estimator") - ), - mock.call( - os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "output/output_data") - ), - ] - ) - - # change to `vertexai.preview.init(remote=False)` to use local prediction - vertexai.preview.init(remote=False) - - # check that local model is updated in place - # `model.score` raises NotFittedError if the model is not updated - model.score(_X_TEST, _Y_TEST) - - @pytest.mark.usefixtures( - "mock_timestamped_unique_name", "mock_get_custom_job", "mock_autolog_disabled" - ) - def test_remote_training_sklearn_with_custom_commands( - self, - mock_any_serializer_sklearn, - mock_create_custom_job, - ): - vertexai.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - staging_bucket=_TEST_BUCKET_NAME, - ) - vertexai.preview.init(remote=True) - - LogisticRegression = vertexai.preview.remote(_logistic.LogisticRegression) - model = LogisticRegression() - model.fit.vertex.remote_config.custom_commands = _TEST_CUSTOM_COMMANDS - - model.fit(_X_TRAIN, _Y_TRAIN) - - # check that model is serialized correctly - mock_any_serializer_sklearn.return_value.serialize.assert_any_call( - to_serialize=model, - gcs_path=os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "input/input_estimator"), - ) - - # check that args are serialized correctly - mock_any_serializer_sklearn.return_value.serialize.assert_any_call( - to_serialize=_X_TRAIN, - gcs_path=os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "input/X"), - ) - mock_any_serializer_sklearn.return_value.serialize.assert_any_call( - to_serialize=_Y_TRAIN, - gcs_path=os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "input/y"), - ) - - # ckeck that CustomJob is created correctly - expected_custom_job = _get_custom_job_proto(custom_commands=True) - mock_create_custom_job.assert_called_once_with( - parent=_TEST_PARENT, - custom_job=expected_custom_job, - timeout=None, - ) - - # check that trained model is deserialized correctly - mock_any_serializer_sklearn.return_value.deserialize.assert_has_calls( - [ - mock.call( - os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "output/output_estimator") - ), - mock.call( - os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "output/output_data") - ), - ] - ) - - # change to `vertexai.preview.init(remote=False)` to use local prediction - vertexai.preview.init(remote=False) - - # check that local model is updated in place - # `model.score` raises NotFittedError if the model is not updated - model.score(_X_TEST, _Y_TEST) - - @pytest.mark.usefixtures( - "mock_timestamped_unique_name", "mock_get_custom_job", "mock_autolog_disabled" - ) - def test_remote_training_sklearn_with_remote_configs( - self, - mock_any_serializer_sklearn, - mock_create_custom_job, - ): - vertexai.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - staging_bucket=_TEST_BUCKET_NAME, - ) - vertexai.preview.init(remote=True) - - LogisticRegression = vertexai.preview.remote(_logistic.LogisticRegression) - model = LogisticRegression() - - # set all training configs - model.fit.vertex.remote_config.display_name = _TEST_TRAINING_CONFIG_DISPLAY_NAME - model.fit.vertex.remote_config.staging_bucket = ( - _TEST_TRAINING_CONFIG_STAGING_BUCKET - ) - model.fit.vertex.remote_config.container_uri = ( - _TEST_TRAINING_CONFIG_CONTAINER_URI - ) - model.fit.vertex.remote_config.machine_type = _TEST_TRAINING_CONFIG_MACHINE_TYPE - model.fit.vertex.remote_config.serializer_args[model] = {"extra_params": 1} - # X_TRAIN is a numpy array that is not hashable. - model.fit.vertex.remote_config.serializer_args[_X_TRAIN] = {"extra_params": 2} - - model.fit(_X_TRAIN, _Y_TRAIN) - - remote_job_base_path = os.path.join( - _TEST_TRAINING_CONFIG_STAGING_BUCKET, _TEST_REMOTE_JOB_NAME - ) - - # check that model is serialized correctly - mock_any_serializer_sklearn.return_value.serialize.assert_any_call( - to_serialize=model, - gcs_path=os.path.join(remote_job_base_path, "input/input_estimator"), - **{"extra_params": 1}, - ) - - # check that args are serialized correctly - mock_any_serializer_sklearn.return_value.serialize.assert_any_call( - to_serialize=_X_TRAIN, - gcs_path=os.path.join(remote_job_base_path, "input/X"), - **{"extra_params": 2}, - ) - mock_any_serializer_sklearn.return_value.serialize.assert_any_call( - to_serialize=_Y_TRAIN, - gcs_path=os.path.join(remote_job_base_path, "input/y"), - **{}, - ) - - # ckeck that CustomJob is created correctly - expected_custom_job = _get_custom_job_proto( - display_name=_TEST_TRAINING_CONFIG_DISPLAY_NAME, - staging_bucket=_TEST_TRAINING_CONFIG_STAGING_BUCKET, - container_uri=_TEST_TRAINING_CONFIG_CONTAINER_URI, - machine_type=_TEST_TRAINING_CONFIG_MACHINE_TYPE, - ) - mock_create_custom_job.assert_called_once_with( - parent=_TEST_PARENT, - custom_job=expected_custom_job, - timeout=None, - ) - - # check that trained model is deserialized correctly - mock_any_serializer_sklearn.return_value.deserialize.assert_has_calls( - [ - mock.call( - os.path.join(remote_job_base_path, "output/output_estimator") - ), - mock.call(os.path.join(remote_job_base_path, "output/output_data")), - ] - ) - - # change to `vertexai.preview.init(remote=False)` to use local prediction - vertexai.preview.init(remote=False) - - # check that local model is updated in place - # `model.score` raises NotFittedError if the model is not updated - model.score(_X_TEST, _Y_TEST) - - @pytest.mark.usefixtures( - "mock_timestamped_unique_name", "mock_get_custom_job", "mock_autolog_disabled" - ) - def test_remote_training_sklearn_with_worker_pool_specs( - self, - mock_any_serializer_sklearn, - mock_create_custom_job, - ): - vertexai.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - staging_bucket=_TEST_BUCKET_NAME, - ) - vertexai.preview.init(remote=True) - - LogisticRegression = vertexai.preview.remote(_logistic.LogisticRegression) - model = LogisticRegression() - - # set all training configs - model.fit.vertex.remote_config.display_name = _TEST_TRAINING_CONFIG_DISPLAY_NAME - model.fit.vertex.remote_config.staging_bucket = ( - _TEST_TRAINING_CONFIG_STAGING_BUCKET - ) - model.fit.vertex.remote_config.container_uri = ( - _TEST_TRAINING_CONFIG_CONTAINER_URI - ) - model.fit.vertex.remote_config.worker_pool_specs = ( - _TEST_TRAINING_CONFIG_WORKER_POOL_SPECS - ) - - model.fit(_X_TRAIN, _Y_TRAIN) - - remote_job_base_path = os.path.join( - _TEST_TRAINING_CONFIG_STAGING_BUCKET, _TEST_REMOTE_JOB_NAME - ) - - # check that model is serialized correctly - mock_any_serializer_sklearn.return_value.serialize.assert_any_call( - to_serialize=model, - gcs_path=os.path.join(remote_job_base_path, "input/input_estimator"), - ) - - # check that args are serialized correctly - mock_any_serializer_sklearn.return_value.serialize.assert_any_call( - to_serialize=_X_TRAIN, - gcs_path=os.path.join(remote_job_base_path, "input/X"), - ) - mock_any_serializer_sklearn.return_value.serialize.assert_any_call( - to_serialize=_Y_TRAIN, - gcs_path=os.path.join(remote_job_base_path, "input/y"), - ) - - # ckeck that CustomJob is created correctly - expected_custom_job = _get_custom_job_proto( - display_name=_TEST_TRAINING_CONFIG_DISPLAY_NAME, - staging_bucket=_TEST_TRAINING_CONFIG_STAGING_BUCKET, - container_uri=_TEST_TRAINING_CONFIG_CONTAINER_URI, - machine_type="n1-standard-4", - ) - mock_create_custom_job.assert_called_once_with( - parent=_TEST_PARENT, - custom_job=expected_custom_job, - timeout=None, - ) - - # check that trained model is deserialized correctly - mock_any_serializer_sklearn.return_value.deserialize.assert_has_calls( - [ - mock.call( - os.path.join(remote_job_base_path, "output/output_estimator") - ), - mock.call(os.path.join(remote_job_base_path, "output/output_data")), - ] - ) - - # change to `vertexai.preview.init(remote=False)` to use local prediction - vertexai.preview.init(remote=False) - - # check that local model is updated in place - # `model.score` raises NotFittedError if the model is not updated - model.score(_X_TEST, _Y_TEST) - - @pytest.mark.usefixtures( - "mock_timestamped_unique_name", - "mock_any_serializer_save_global_metadata", - "mock_any_serializer_load_global_metadata", - "mock_get_custom_job", - "mock_any_serializer_deserialize_sklearn", - "mock_autolog_disabled", - ) - def test_remote_training_sklearn_with_set_config( - self, - mock_any_serializer_serialize_sklearn, - mock_create_custom_job, - ): - vertexai.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - staging_bucket=_TEST_BUCKET_NAME, - ) - vertexai.preview.init(remote=True) - - LogisticRegression = vertexai.preview.remote(_logistic.LogisticRegression) - model = LogisticRegression() - - # set training config via dict - model.fit.vertex.set_config( - display_name=_TEST_TRAINING_CONFIG_DISPLAY_NAME, - staging_bucket=_TEST_TRAINING_CONFIG_STAGING_BUCKET, - container_uri=_TEST_TRAINING_CONFIG_CONTAINER_URI, - worker_pool_specs=_TEST_TRAINING_CONFIG_WORKER_POOL_SPECS, - ) - - model.fit(_X_TRAIN, _Y_TRAIN) - - remote_job_base_path = os.path.join( - _TEST_TRAINING_CONFIG_STAGING_BUCKET, _TEST_REMOTE_JOB_NAME - ) - - # check that model is serialized correctly - mock_any_serializer_serialize_sklearn.assert_any_call( - to_serialize=model, - gcs_path=os.path.join(remote_job_base_path, "input/input_estimator"), - ) - - # check that args are serialized correctly - mock_any_serializer_serialize_sklearn.assert_any_call( - to_serialize=_X_TRAIN, - gcs_path=os.path.join(remote_job_base_path, "input/X"), - ) - mock_any_serializer_serialize_sklearn.assert_any_call( - to_serialize=_Y_TRAIN, - gcs_path=os.path.join(remote_job_base_path, "input/y"), - ) - - # ckeck that CustomJob is created correctly - expected_custom_job = _get_custom_job_proto( - display_name=_TEST_TRAINING_CONFIG_DISPLAY_NAME, - staging_bucket=_TEST_TRAINING_CONFIG_STAGING_BUCKET, - container_uri=_TEST_TRAINING_CONFIG_CONTAINER_URI, - machine_type="n1-standard-4", - ) - mock_create_custom_job.assert_called_once_with( - parent=_TEST_PARENT, - custom_job=expected_custom_job, - timeout=None, - ) - - @pytest.mark.usefixtures( - "mock_timestamped_unique_name", - "mock_get_custom_job", - "mock_any_serializer_sklearn", - "mock_autolog_disabled", - ) - def test_set_config_raises_with_unsupported_arg( - self, - mock_create_custom_job, - ): - vertexai.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - staging_bucket=_TEST_BUCKET_NAME, - ) - vertexai.preview.init(remote=True) - - LogisticRegression = vertexai.preview.remote(_logistic.LogisticRegression) - model = LogisticRegression() - - # RemoteConfig doesn't have `boot_disk_type`, only DistributedTrainingConfig - with pytest.raises(ValueError): - model.fit.vertex.set_config(boot_disk_type=_TEST_BOOT_DISK_TYPE) - - @pytest.mark.usefixtures( - "mock_timestamped_unique_name", "mock_get_custom_job", "mock_autolog_disabled" - ) - def test_remote_training_sklearn_with_invalid_remote_config( - self, - mock_any_serializer_sklearn, - mock_create_custom_job, - ): - vertexai.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - staging_bucket=_TEST_BUCKET_NAME, - ) - vertexai.preview.init(remote=True) - - LogisticRegression = vertexai.preview.remote(_logistic.LogisticRegression) - model = LogisticRegression() - - # set all training configs - model.fit.vertex.remote_config.display_name = _TEST_TRAINING_CONFIG_DISPLAY_NAME - model.fit.vertex.remote_config.staging_bucket = ( - _TEST_TRAINING_CONFIG_STAGING_BUCKET - ) - model.fit.vertex.remote_config.container_uri = ( - _TEST_TRAINING_CONFIG_CONTAINER_URI - ) - model.fit.vertex.remote_config.worker_pool_specs = ( - _TEST_TRAINING_CONFIG_WORKER_POOL_SPECS - ) - model.fit.vertex.remote_config.machine_type = _TEST_TRAINING_CONFIG_MACHINE_TYPE - - with pytest.raises( - ValueError, - match=re.escape( - "Cannot specify both 'worker_pool_specs' and ['machine_type', 'accelerator_type', 'accelerator_count', 'replica_count', 'boot_disk_type', 'boot_disk_size_gb']." - ), - ): - model.fit(_X_TRAIN, _Y_TRAIN) - - @pytest.mark.usefixtures( - "mock_timestamped_unique_name", "mock_get_custom_job", "mock_autolog_disabled" - ) - def test_remote_gpu_training_keras( - self, - mock_any_serializer_keras, - mock_create_custom_job, - ): - vertexai.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - staging_bucket=_TEST_BUCKET_NAME, - ) - vertexai.preview.init(remote=True) - - tf.keras.VertexSequential = vertexai.preview.remote(tf.keras.Sequential) - model = tf.keras.VertexSequential( - [tf.keras.layers.Dense(5, input_shape=(4,)), tf.keras.layers.Softmax()] - ) - model.compile(optimizer="adam", loss="mean_squared_error") - model.fit.vertex.remote_config.enable_cuda = True - model.fit(_X_TRAIN, _Y_TRAIN) - - # check that model is serialized correctly - mock_any_serializer_keras.return_value.serialize.assert_any_call( - to_serialize=model, - gcs_path=os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "input/input_estimator"), - ) - - # check that args are serialized correctly - mock_any_serializer_keras.return_value.serialize.assert_any_call( - to_serialize=_X_TRAIN, - gcs_path=os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "input/x"), - ) - mock_any_serializer_keras.return_value.serialize.assert_any_call( - to_serialize=_Y_TRAIN, - gcs_path=os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "input/y"), - ) - - # ckeck that CustomJob is created correctly - expected_custom_job = _get_custom_job_proto(cuda_enabled=True, model=model) - mock_create_custom_job.assert_called_once_with( - parent=_TEST_PARENT, - custom_job=expected_custom_job, - timeout=None, - ) - - # check that trained model is deserialized correctly - mock_any_serializer_keras.return_value.deserialize.assert_has_calls( - [ - mock.call( - os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "output/output_estimator") - ), - mock.call( - os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "output/output_data"), - model=model, - ), - ] - ) - - @pytest.mark.usefixtures( - "mock_timestamped_unique_name", "mock_get_custom_job", "mock_autolog_disabled" - ) - def test_remote_gpu_training_keras_with_remote_configs( - self, - mock_any_serializer_keras, - mock_create_custom_job, - ): - vertexai.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - staging_bucket=_TEST_BUCKET_NAME, - ) - vertexai.preview.init(remote=True) - - tf.keras.VertexSequential = vertexai.preview.remote(tf.keras.Sequential) - model = tf.keras.VertexSequential( - [tf.keras.layers.Dense(5, input_shape=(4,)), tf.keras.layers.Softmax()] - ) - model.compile(optimizer="adam", loss="mean_squared_error") - - model.fit.vertex.remote_config.enable_cuda = True - model.fit.vertex.remote_config.container_uri = ( - _TEST_TRAINING_CONFIG_CONTAINER_URI - ) - model.fit.vertex.remote_config.machine_type = _TEST_TRAINING_CONFIG_MACHINE_TYPE - model.fit.vertex.remote_config.accelerator_type = ( - _TEST_TRAINING_CONFIG_ACCELERATOR_TYPE - ) - model.fit.vertex.remote_config.accelerator_count = ( - _TEST_TRAINING_CONFIG_ACCELERATOR_COUNT - ) - - model.fit(_X_TRAIN, _Y_TRAIN) - - # check that model is serialized correctly - mock_any_serializer_keras.return_value.serialize.assert_any_call( - to_serialize=model, - gcs_path=os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "input/input_estimator"), - ) - - # check that args are serialized correctly - mock_any_serializer_keras.return_value.serialize.assert_any_call( - to_serialize=_X_TRAIN, - gcs_path=os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "input/x"), - ) - mock_any_serializer_keras.return_value.serialize.assert_any_call( - to_serialize=_Y_TRAIN, - gcs_path=os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "input/y"), - ) - - # ckeck that CustomJob is created correctly - expected_custom_job = _get_custom_job_proto( - cuda_enabled=True, - model=model, - container_uri=_TEST_TRAINING_CONFIG_CONTAINER_URI, - machine_type=_TEST_TRAINING_CONFIG_MACHINE_TYPE, - accelerator_type=_TEST_TRAINING_CONFIG_ACCELERATOR_TYPE, - accelerator_count=_TEST_TRAINING_CONFIG_ACCELERATOR_COUNT, - ) - mock_create_custom_job.assert_called_once_with( - parent=_TEST_PARENT, - custom_job=expected_custom_job, - timeout=None, - ) - - # check that trained model is deserialized correctly - mock_any_serializer_keras.return_value.deserialize.assert_has_calls( - [ - mock.call( - os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "output/output_estimator") - ), - mock.call( - os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "output/output_data"), - model=model, - ), - ] - ) - - @pytest.mark.usefixtures( - "mock_timestamped_unique_name", "mock_get_custom_job", "mock_autolog_disabled" - ) - def test_remote_training_keras_with_worker_pool_specs( - self, - mock_any_serializer_keras, - mock_create_custom_job, - ): - vertexai.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - staging_bucket=_TEST_BUCKET_NAME, - ) - vertexai.preview.init(remote=True) - - tf.keras.VertexSequential = vertexai.preview.remote(tf.keras.Sequential) - model = tf.keras.VertexSequential( - [tf.keras.layers.Dense(5, input_shape=(4,)), tf.keras.layers.Softmax()] - ) - model.compile(optimizer="adam", loss="mean_squared_error") - - model.fit.vertex.remote_config.enable_distributed = True - model.fit.vertex.remote_config.enable_cuda = True - model.fit.vertex.remote_config.container_uri = ( - _TEST_TRAINING_CONFIG_CONTAINER_URI - ) - model.fit.vertex.remote_config.worker_pool_specs = ( - _TEST_TRAINING_CONFIG_WORKER_POOL_SPECS_GPU - ) - - model.fit(_X_TRAIN, _Y_TRAIN) - - # check that model is serialized correctly - mock_any_serializer_keras.return_value.serialize.assert_any_call( - to_serialize=model, - gcs_path=os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "input/input_estimator"), - ) - - # check that args are serialized correctly - mock_any_serializer_keras.return_value.serialize.assert_any_call( - to_serialize=_X_TRAIN, - gcs_path=os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "input/x"), - ) - mock_any_serializer_keras.return_value.serialize.assert_any_call( - to_serialize=_Y_TRAIN, - gcs_path=os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "input/y"), - ) - - # ckeck that CustomJob is created correctly - expected_custom_job = _get_custom_job_proto( - cuda_enabled=True, - model=model, - container_uri=_TEST_TRAINING_CONFIG_CONTAINER_URI, - machine_type=_TEST_TRAINING_CONFIG_MACHINE_TYPE, - accelerator_type=_TEST_TRAINING_CONFIG_ACCELERATOR_TYPE, - accelerator_count=_TEST_TRAINING_CONFIG_ACCELERATOR_COUNT, - boot_disk_type=_TEST_BOOT_DISK_TYPE, - boot_disk_size_gb=_TEST_BOOT_DISK_SIZE_GB, - distributed_enabled=True, - ) - mock_create_custom_job.assert_called_once_with( - parent=_TEST_PARENT, - custom_job=expected_custom_job, - timeout=None, - ) - - # check that trained model is deserialized correctly - mock_any_serializer_keras.return_value.deserialize.assert_has_calls( - [ - mock.call( - os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "output/output_estimator") - ), - mock.call( - os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "output/output_data"), - model=model, - ), - ] - ) - - @pytest.mark.usefixtures( - "mock_timestamped_unique_name", "mock_get_custom_job", "mock_autolog_disabled" - ) - def test_remote_training_keras_distributed_cuda_no_worker_pool_specs( - self, - mock_any_serializer_keras, - mock_create_custom_job, - ): - vertexai.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - staging_bucket=_TEST_BUCKET_NAME, - ) - vertexai.preview.init(remote=True) - - tf.keras.VertexSequential = vertexai.preview.remote(tf.keras.Sequential) - model = tf.keras.VertexSequential( - [tf.keras.layers.Dense(5, input_shape=(4,)), tf.keras.layers.Softmax()] - ) - model.compile(optimizer="adam", loss="mean_squared_error") - - model.fit.vertex.remote_config.enable_distributed = True - model.fit.vertex.remote_config.enable_cuda = True - model.fit.vertex.remote_config.container_uri = ( - _TEST_TRAINING_CONFIG_CONTAINER_URI - ) - - model.fit(_X_TRAIN, _Y_TRAIN) - - # check that model is serialized correctly - mock_any_serializer_keras.return_value.serialize.assert_any_call( - to_serialize=model, - gcs_path=os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "input/input_estimator"), - ) - - # check that args are serialized correctly - mock_any_serializer_keras.return_value.serialize.assert_any_call( - to_serialize=_X_TRAIN, - gcs_path=os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "input/x"), - ) - mock_any_serializer_keras.return_value.serialize.assert_any_call( - to_serialize=_Y_TRAIN, - gcs_path=os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "input/y"), - ) - - # ckeck that CustomJob is created correctly - expected_custom_job = _get_custom_job_proto( - cuda_enabled=True, - model=model, - container_uri=_TEST_TRAINING_CONFIG_CONTAINER_URI, - machine_type="n1-standard-16", - accelerator_type="NVIDIA_TESLA_P100", - accelerator_count=1, - boot_disk_type="pd-ssd", - boot_disk_size_gb=100, - distributed_enabled=True, - ) - - expected_custom_job.job_spec.worker_pool_specs = [ - expected_custom_job.job_spec.worker_pool_specs[0], - expected_custom_job.job_spec.worker_pool_specs[0], - ] - - mock_create_custom_job.assert_called_once_with( - parent=_TEST_PARENT, - custom_job=expected_custom_job, - timeout=None, - ) - - # check that trained model is deserialized correctly - mock_any_serializer_keras.return_value.deserialize.assert_has_calls( - [ - mock.call( - os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "output/output_estimator") - ), - mock.call( - os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "output/output_data"), - model=model, - ), - ] - ) - - @pytest.mark.usefixtures( - "mock_timestamped_unique_name", "mock_get_custom_job", "mock_autolog_disabled" - ) - def test_remote_training_keras_distributed_no_cuda_no_worker_pool_specs( - self, - mock_any_serializer_keras, - mock_create_custom_job, - ): - vertexai.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - staging_bucket=_TEST_BUCKET_NAME, - ) - vertexai.preview.init(remote=True) - - tf.keras.VertexSequential = vertexai.preview.remote(tf.keras.Sequential) - model = tf.keras.VertexSequential( - [tf.keras.layers.Dense(5, input_shape=(4,)), tf.keras.layers.Softmax()] - ) - model.compile(optimizer="adam", loss="mean_squared_error") - - model.fit.vertex.remote_config.enable_distributed = True - model.fit.vertex.remote_config.enable_cuda = False - model.fit.vertex.remote_config.container_uri = ( - _TEST_TRAINING_CONFIG_CONTAINER_URI - ) - - model.fit(_X_TRAIN, _Y_TRAIN) - - # check that model is serialized correctly - mock_any_serializer_keras.return_value.serialize.assert_any_call( - to_serialize=model, - gcs_path=os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "input/input_estimator"), - ) - - # check that args are serialized correctly - mock_any_serializer_keras.return_value.serialize.assert_any_call( - to_serialize=_X_TRAIN, - gcs_path=os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "input/x"), - ) - mock_any_serializer_keras.return_value.serialize.assert_any_call( - to_serialize=_Y_TRAIN, - gcs_path=os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "input/y"), - ) - - # check that CustomJob is created correctly - expected_custom_job = _get_custom_job_proto( - model=model, - container_uri=_TEST_TRAINING_CONFIG_CONTAINER_URI, - machine_type="n1-standard-4", - boot_disk_type="pd-ssd", - boot_disk_size_gb=100, - distributed_enabled=True, - ) - expected_custom_job.job_spec.worker_pool_specs = [ - expected_custom_job.job_spec.worker_pool_specs[0], - expected_custom_job.job_spec.worker_pool_specs[0], - ] - - mock_create_custom_job.assert_called_once_with( - parent=_TEST_PARENT, - custom_job=expected_custom_job, - timeout=None, - ) - - # check that trained model is deserialized correctly - mock_any_serializer_keras.return_value.deserialize.assert_has_calls( - [ - mock.call( - os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "output/output_estimator") - ), - mock.call( - os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "output/output_data"), - model=model, - ), - ] - ) - - # TODO(b/300116902) Remove this once we find better solution. - @pytest.mark.xfail( - sys.version_info.minor >= 8, - raises=ValueError, - reason="Flaky in python >=3.8", - ) - @pytest.mark.usefixtures( - "list_default_tensorboard_mock", - "mock_timestamped_unique_name", - "mock_get_custom_job", - "mock_get_project_number", - "mock_get_experiment_run", - "mock_get_metadata_store", - "get_artifact_not_found_mock", - "update_context_mock", - "mock_autolog_disabled", - "get_tensorboard_mock", - ) - def test_remote_training_sklearn_with_experiment( - self, - mock_any_serializer_sklearn, - mock_create_custom_job, - ): - vertexai.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - staging_bucket=_TEST_BUCKET_NAME, - experiment=_TEST_EXPERIMENT, - credentials=_TEST_CREDENTIALS, - ) - vertexai.preview.init(remote=True) - - vertexai.preview.start_run(_TEST_EXPERIMENT_RUN, resume=True) - - LogisticRegression = vertexai.preview.remote(_logistic.LogisticRegression) - model = LogisticRegression() - - model.fit.vertex.remote_config.service_account = "GCE" - - model.fit(_X_TRAIN, _Y_TRAIN) - - # check that model is serialized correctly - mock_any_serializer_sklearn.return_value.serialize.assert_any_call( - to_serialize=model, - gcs_path=os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "input/input_estimator"), - ) - - # check that args are serialized correctly - mock_any_serializer_sklearn.return_value.serialize.assert_any_call( - to_serialize=_X_TRAIN, - gcs_path=os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "input/X"), - ) - mock_any_serializer_sklearn.return_value.serialize.assert_any_call( - to_serialize=_Y_TRAIN, - gcs_path=os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "input/y"), - ) - - # ckeck that CustomJob is created correctly - expected_custom_job = _get_custom_job_proto( - service_account=f"{_TEST_PROJECT_NUMBER}-compute@developer.gserviceaccount.com", - ) - mock_create_custom_job.assert_called_once_with( - parent=_TEST_PARENT, - custom_job=expected_custom_job, - timeout=None, - ) - - # check that trained model is deserialized correctly - mock_any_serializer_sklearn.return_value.deserialize.assert_has_calls( - [ - mock.call( - os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "output/output_estimator") - ), - mock.call( - os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "output/output_data") - ), - ] - ) - - # change to `vertexai.preview.init(remote=False)` to use local prediction - vertexai.preview.init(remote=False) - - # check that local model is updated in place - # `model.score` raises NotFittedError if the model is not updated - model.score(_X_TEST, _Y_TEST) - - # TODO(b/300116902) Remove this once we find better solution - @pytest.mark.xfail( - sys.version_info.minor >= 8, - raises=ValueError, - reason="Flaky in python >=3.8", - ) - @pytest.mark.usefixtures( - "list_default_tensorboard_mock", - "mock_timestamped_unique_name", - "mock_get_custom_job", - "mock_get_experiment_run", - "mock_get_metadata_store", - "get_artifact_not_found_mock", - "update_context_mock", - "aiplatform_autolog_mock", - "mock_autolog_enabled", - "get_tensorboard_mock", - ) - def test_remote_training_sklearn_with_experiment_autolog_enabled( - self, - mock_any_serializer_sklearn, - mock_create_custom_job, - ): - vertexai.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - staging_bucket=_TEST_BUCKET_NAME, - experiment=_TEST_EXPERIMENT, - credentials=_TEST_CREDENTIALS, - ) - vertexai.preview.init(remote=True, autolog=True) - - vertexai.preview.start_run(_TEST_EXPERIMENT_RUN, resume=True) - - LogisticRegression = vertexai.preview.remote(_logistic.LogisticRegression) - model = LogisticRegression() - - model.fit.vertex.remote_config.service_account = "custom-sa" - - model.fit(_X_TRAIN, _Y_TRAIN) - - # check that model is serialized correctly - mock_any_serializer_sklearn.return_value.serialize.assert_any_call( - to_serialize=model, - gcs_path=os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "input/input_estimator"), - ) - - # check that args are serialized correctly - mock_any_serializer_sklearn.return_value.serialize.assert_any_call( - to_serialize=_X_TRAIN, - gcs_path=os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "input/X"), - ) - mock_any_serializer_sklearn.return_value.serialize.assert_any_call( - to_serialize=_Y_TRAIN, - gcs_path=os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "input/y"), - ) - - # ckeck that CustomJob is created correctly - expected_custom_job = _get_custom_job_proto( - service_account="custom-sa", - experiment=_TEST_EXPERIMENT, - experiment_run=_TEST_EXPERIMENT_RUN, - autolog_enabled=True, - ) - mock_create_custom_job.assert_called_once_with( - parent=_TEST_PARENT, - custom_job=expected_custom_job, - timeout=None, - ) - - # check that trained model is deserialized correctly - mock_any_serializer_sklearn.return_value.deserialize.assert_has_calls( - [ - mock.call( - os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "output/output_estimator") - ), - mock.call( - os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "output/output_data") - ), - ] - ) - - # change to `vertexai.preview.init(remote=False)` to use local prediction - vertexai.preview.init(remote=False) - - # check that local model is updated in place - # `model.score` raises NotFittedError if the model is not updated - model.score(_X_TEST, _Y_TEST) - - def test_get_service_account_custom_service_account(self): - config = configs.RemoteConfig() - config.service_account = "custom-sa" - - service_account = training._get_service_account(config, autolog=True) - - assert service_account == "custom-sa" - - @pytest.mark.usefixtures( - "mock_get_project_number", - ) - def test_get_service_account_gce_service_account(self): - config = configs.RemoteConfig() - config.service_account = "GCE" - - service_account = training._get_service_account(config, autolog=True) - - assert ( - service_account - == f"{_TEST_PROJECT_NUMBER}-compute@developer.gserviceaccount.com" - ) - - def test_get_service_account_empty_sa_autolog_enabled(self): - config = configs.RemoteConfig() - # config.service_account is empty - - with pytest.raises(ValueError): - training._get_service_account(config, autolog=True) - - def test_get_service_account_empty_sa_autolog_disabled(self): - config = configs.RemoteConfig() - # config.service_account is empty - - service_account = training._get_service_account(config, autolog=False) - - assert service_account is None - - @pytest.mark.usefixtures( - "mock_timestamped_unique_name", - "mock_get_custom_job", - "mock_autolog_disabled", - "persistent_resource_running_mock", - ) - def test_remote_training_sklearn_with_persistent_cluster( - self, - mock_any_serializer_sklearn, - mock_create_custom_job, - ): - vertexai.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - staging_bucket=_TEST_BUCKET_NAME, - ) - vertexai.preview.init(remote=True, cluster=_TEST_PERSISTENT_RESOURCE_CONFIG) - - LogisticRegression = vertexai.preview.remote(_logistic.LogisticRegression) - model = LogisticRegression() - - model.fit(_X_TRAIN, _Y_TRAIN) - - # check that model is serialized correctly - mock_any_serializer_sklearn.return_value.serialize.assert_any_call( - to_serialize=model, - gcs_path=os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "input/input_estimator"), - ) - - # check that args are serialized correctly - mock_any_serializer_sklearn.return_value.serialize.assert_any_call( - to_serialize=_X_TRAIN, - gcs_path=os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "input/X"), - ) - mock_any_serializer_sklearn.return_value.serialize.assert_any_call( - to_serialize=_Y_TRAIN, - gcs_path=os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "input/y"), - ) - - # ckeck that CustomJob is created correctly - expected_custom_job = _get_custom_job_proto( - persistent_resource_id=_TEST_PERSISTENT_RESOURCE_ID, - ) - mock_create_custom_job.assert_called_once_with( - parent=_TEST_PARENT, - custom_job=expected_custom_job, - timeout=None, - ) - - # check that trained model is deserialized correctly - mock_any_serializer_sklearn.return_value.deserialize.assert_has_calls( - [ - mock.call( - os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "output/output_estimator") - ), - mock.call( - os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "output/output_data") - ), - ] - ) - - # change to `vertexai.preview.init(remote=False)` to use local prediction - vertexai.preview.init(remote=False) - - # check that local model is updated in place - # `model.score` raises NotFittedError if the model is not updated - model.score(_X_TEST, _Y_TEST) - - @pytest.mark.usefixtures( - "mock_timestamped_unique_name", - "mock_get_custom_job", - "mock_autolog_disabled", - "persistent_resource_running_mock", - ) - def test_initialize_existing_persistent_resource_service_account_mismatch(self): - vertexai.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - staging_bucket=_TEST_BUCKET_NAME, - ) - with pytest.raises(ValueError) as e: - vertexai.preview.init( - cluster=_TEST_PERSISTENT_RESOURCE_CONFIG_SERVICE_ACCOUNT - ) - e.match( - regexp=r"Expect the existing cluster was created with the service account " - ) - - @pytest.mark.usefixtures( - "mock_get_project_number", - "list_default_tensorboard_mock", - "mock_get_experiment_run", - "mock_get_metadata_store", - "get_artifact_not_found_mock", - "update_context_mock", - "aiplatform_autolog_mock", - "mock_autolog_enabled", - "persistent_resource_running_mock", - "get_tensorboard_mock", - ) - def test_remote_training_sklearn_with_persistent_cluster_no_service_account_and_experiment_error( - self, - ): - vertexai.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - staging_bucket=_TEST_BUCKET_NAME, - experiment=_TEST_EXPERIMENT, - credentials=_TEST_CREDENTIALS, - ) - vertexai.preview.init( - remote=True, autolog=True, cluster=_TEST_PERSISTENT_RESOURCE_CONFIG - ) - - LogisticRegression = vertexai.preview.remote(_logistic.LogisticRegression) - model = LogisticRegression() - - with pytest.raises(ValueError) as e: - model.fit.vertex.remote_config.service_account = "GCE" - model.fit(_X_TRAIN, _Y_TRAIN) - e.match(regexp=r"The service account for autologging") - - # TODO(b/300116902) Remove this once we find better solution. - @pytest.mark.xfail( - sys.version_info.minor >= 8, - raises=ValueError, - reason="Flaky in python >=3.8", - ) - @pytest.mark.usefixtures( - "mock_get_project_number", - "list_default_tensorboard_mock", - "mock_get_experiment_run", - "mock_get_metadata_store", - "get_artifact_not_found_mock", - "update_context_mock", - "aiplatform_autolog_mock", - "mock_autolog_enabled", - "persistent_resource_service_account_running_mock", - "mock_timestamped_unique_name", - "mock_get_custom_job", - "get_tensorboard_mock", - ) - def test_remote_training_sklearn_with_persistent_cluster_and_experiment_autologging( - self, - mock_any_serializer_sklearn, - mock_create_custom_job, - ): - vertexai.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - staging_bucket=_TEST_BUCKET_NAME, - experiment=_TEST_EXPERIMENT, - credentials=_TEST_CREDENTIALS, - ) - vertexai.preview.init( - remote=True, - autolog=True, - cluster=_TEST_PERSISTENT_RESOURCE_CONFIG_SERVICE_ACCOUNT, - ) - - vertexai.preview.start_run(_TEST_EXPERIMENT_RUN, resume=True) - - LogisticRegression = vertexai.preview.remote(_logistic.LogisticRegression) - model = LogisticRegression() - - model.fit.vertex.remote_config.service_account = _TEST_SERVICE_ACCOUNT - - model.fit(_X_TRAIN, _Y_TRAIN) - - # check that model is serialized correctly - mock_any_serializer_sklearn.return_value.serialize.assert_any_call( - to_serialize=model, - gcs_path=os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "input/input_estimator"), - ) - - # check that args are serialized correctly - mock_any_serializer_sklearn.return_value.serialize.assert_any_call( - to_serialize=_X_TRAIN, - gcs_path=os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "input/X"), - ) - mock_any_serializer_sklearn.return_value.serialize.assert_any_call( - to_serialize=_Y_TRAIN, - gcs_path=os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "input/y"), - ) - - # ckeck that CustomJob is created correctly - expected_custom_job = _get_custom_job_proto( - service_account=_TEST_SERVICE_ACCOUNT, - experiment=_TEST_EXPERIMENT, - experiment_run=_TEST_EXPERIMENT_RUN, - autolog_enabled=True, - persistent_resource_id=_TEST_PERSISTENT_RESOURCE_ID, - ) - mock_create_custom_job.assert_called_once_with( - parent=_TEST_PARENT, - custom_job=expected_custom_job, - timeout=None, - ) - - # check that trained model is deserialized correctly - mock_any_serializer_sklearn.return_value.deserialize.assert_has_calls( - [ - mock.call( - os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "output/output_estimator") - ), - mock.call( - os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "output/output_data") - ), - ] - ) - - # change to `vertexai.preview.init(remote=False)` to use local prediction - vertexai.preview.init(remote=False) - - # check that local model is updated in place - # `model.score` raises NotFittedError if the model is not updated - model.score(_X_TEST, _Y_TEST) - - @pytest.mark.usefixtures( - "mock_timestamped_unique_name", - "mock_get_custom_job", - "mock_autolog_disabled", - "persistent_resource_running_mock", - ) - def test_remote_training_sklearn_with_persistent_cluster_disabled( - self, - mock_any_serializer_sklearn, - mock_create_custom_job, - ): - vertexai.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - staging_bucket=_TEST_BUCKET_NAME, - ) - # Enable persistent resource executor - vertexai.preview.init(remote=True, cluster=_TEST_PERSISTENT_RESOURCE_CONFIG) - # Disable persistent resource executor - vertexai.preview.init( - remote=True, cluster=_TEST_PERSISTENT_RESOURCE_CONFIG_DISABLE - ) - - LogisticRegression = vertexai.preview.remote(_logistic.LogisticRegression) - model = LogisticRegression() - - model.fit(_X_TRAIN, _Y_TRAIN) - - # check that model is serialized correctly - mock_any_serializer_sklearn.return_value.serialize.assert_any_call( - to_serialize=model, - gcs_path=os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "input/input_estimator"), - ) - - # check that args are serialized correctly - mock_any_serializer_sklearn.return_value.serialize.assert_any_call( - to_serialize=_X_TRAIN, - gcs_path=os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "input/X"), - ) - mock_any_serializer_sklearn.return_value.serialize.assert_any_call( - to_serialize=_Y_TRAIN, - gcs_path=os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "input/y"), - ) - - # ckeck that CustomJob is created correctly without persistent_resource_id - expected_custom_job = _get_custom_job_proto() - mock_create_custom_job.assert_called_once_with( - parent=_TEST_PARENT, - custom_job=expected_custom_job, - timeout=None, - ) - - # check that trained model is deserialized correctly - mock_any_serializer_sklearn.return_value.deserialize.assert_has_calls( - [ - mock.call( - os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "output/output_estimator") - ), - mock.call( - os.path.join(_TEST_REMOTE_JOB_BASE_PATH, "output/output_data") - ), - ] - ) - - # change to `vertexai.preview.init(remote=False)` to use local prediction - vertexai.preview.init(remote=False) - - # check that local model is updated in place - # `model.score` raises NotFittedError if the model is not updated - model.score(_X_TEST, _Y_TEST) - - def test_resource_pool_return_spec_dict(self): - test_pool = resource_pool_utils._ResourcePool( - replica_count=_TEST_REPLICA_COUNT, - machine_type=_TEST_MACHINE_TYPE, - accelerator_count=_TEST_ACCELERATOR_COUNT, - accelerator_type=_TEST_ACCELERATOR_TYPE, - ) - true_spec_dict = { - "machine_spec": { - "machine_type": _TEST_MACHINE_TYPE, - "accelerator_type": _TEST_ACCELERATOR_TYPE, - "accelerator_count": _TEST_ACCELERATOR_COUNT, - }, - "replica_count": _TEST_REPLICA_COUNT, - "disk_spec": { - "boot_disk_type": "pd-ssd", - "boot_disk_size_gb": 100, - }, - } - - assert test_pool.spec_dict == true_spec_dict diff --git a/tests/unit/vertexai/test_serializers.py b/tests/unit/vertexai/test_serializers.py deleted file mode 100644 index 8d38d43b98..0000000000 --- a/tests/unit/vertexai/test_serializers.py +++ /dev/null @@ -1,1414 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from importlib import reload -import json -import os -import pickle -import types -from unittest.mock import ANY - -import cloudpickle -from google.cloud import aiplatform -import vertexai -from google.cloud.aiplatform.utils import gcs_utils -from vertexai.preview._workflow.serialization_engine import ( - any_serializer as any_serializer_lib, -) -from vertexai.preview._workflow.serialization_engine import ( - serializers, -) -from vertexai.preview._workflow.shared import constants -from vertexai.preview._workflow.shared import ( - supported_frameworks, -) - -import mock -import numpy as np -import pandas as pd -from pyfakefs import fake_filesystem_unittest -import pytest -from sklearn.linear_model import _logistic -import tensorflow as tf -from tensorflow import keras -import torch - - -@pytest.fixture -def mock_isvalid_gcs_path(): - """Allow using a local path in test.""" - with mock.patch.object( - serializers, - "_is_valid_gcs_path", - autospec=True, - return_value=True, - ) as always_return_true_mock_path_check: - yield always_return_true_mock_path_check - - -@pytest.fixture -def cloudpickle_serializer(): - return serializers.CloudPickleSerializer() - - -@pytest.fixture -def any_serializer(): - return any_serializer_lib.AnySerializer() - - -@pytest.fixture -def sklearn_estimator_serializer(): - return serializers.SklearnEstimatorSerializer() - - -@pytest.fixture -def keras_model_serializer(): - return serializers.KerasModelSerializer() - - -@pytest.fixture -def keras_history_callback_serializer(): - return serializers.KerasHistoryCallbackSerializer() - - -@pytest.fixture -def torch_model_serializer(): - return serializers.TorchModelSerializer() - - -@pytest.fixture -def pandas_data_serializer(): - return serializers.PandasDataSerializer() - - -@pytest.fixture -def torch_dataloader_serializer(): - return serializers.TorchDataLoaderSerializer() - - -@pytest.fixture -def bigframe_serializer(): - return serializers.BigframeSerializer() - - -@pytest.fixture -def tf_dataset_serializer(): - return serializers.TFDatasetSerializer() - - -@pytest.fixture -def mock_keras_model_deserialize(): - with mock.patch.object( - serializers.KerasModelSerializer, "deserialize", autospec=True - ) as keras_model_deserialize: - yield keras_model_deserialize - - -@pytest.fixture -def mock_sklearn_estimator_serialize(): - def stateful_serialize(self, to_serialize, gcs_path): - del self, to_serialize, gcs_path - serializers.SklearnEstimatorSerializer._metadata.dependencies = [ - "sklearn_dependency1==1.0.0" - ] - - with mock.patch.object( - serializers.SklearnEstimatorSerializer, - "serialize", - new=stateful_serialize, - ) as sklearn_estimator_serialize: - yield sklearn_estimator_serialize - serializers.SklearnEstimatorSerializer._metadata.dependencies = [] - - -@pytest.fixture -def mock_sklearn_estimator_deserialize(): - with mock.patch.object( - serializers.SklearnEstimatorSerializer, "deserialize", autospec=True - ) as sklearn_estimator_deserialize: - yield sklearn_estimator_deserialize - - -@pytest.fixture -def mock_torch_model_serialize(): - def stateful_serialize(self, to_serialize, gcs_path): - del self, to_serialize, gcs_path - serializers.TorchModelSerializer._metadata.dependencies = ["torch==1.0.0"] - - with mock.patch.object( - serializers.TorchModelSerializer, "serialize", new=stateful_serialize - ) as torch_model_serialize: - yield torch_model_serialize - serializers.TorchModelSerializer._metadata.dependencies = [] - - -@pytest.fixture -def mock_torch_model_deserialize(): - with mock.patch.object( - serializers.TorchModelSerializer, "deserialize", autospec=True - ) as torch_model_deserialize: - yield torch_model_deserialize - - -@pytest.fixture -def mock_torch_dataloader_serialize(tmp_path): - def stateful_serialize(self, to_serialize, gcs_path): - del self, to_serialize, gcs_path - serializers.TorchDataLoaderSerializer._metadata.dependencies = ["torch==1.0.0"] - - with mock.patch.object( - serializers.TorchDataLoaderSerializer, "serialize", new=stateful_serialize - ) as torch_dataloader_serialize: - yield torch_dataloader_serialize - serializers.TorchDataLoaderSerializer._metadata.dependencies = [] - - -@pytest.fixture -def mock_torch_dataloader_deserialize(): - with mock.patch.object( - serializers.TorchDataLoaderSerializer, "deserialize", autospec=True - ) as torch_dataloader_serializer: - yield torch_dataloader_serializer - - -@pytest.fixture -def mock_download_from_gcs_for_torch_dataloader(tmp_path, torch_dataloader_serializer): - def fake_download_from_gcs(serialized_gcs_path, temp_dir): - dataloader = torch.utils.data.DataLoader( - torch.utils.data.TensorDataset( - torch.tensor([[1, 2, 3] for i in range(100)]), - torch.tensor([1] * 100), - ), - batch_size=10, - shuffle=True, - ) - torch_dataloader_serializer._serialize_to_local( - dataloader, os.fspath(tmp_path / temp_dir) - ) - - with mock.patch.object( - gcs_utils, "download_from_gcs", new=fake_download_from_gcs - ) as download_from_gcs: - yield download_from_gcs - - -@pytest.fixture -def mock_download_from_gcs_for_keras_model(tmp_path): - def fake_download_from_gcs(serialized_gcs_path, temp_dir): - keras_model = keras.models.Sequential( - [keras.layers.Dense(8, input_shape=(2,)), keras.layers.Dense(4)] - ) - keras_model.save(tmp_path / temp_dir, save_format="tf") - - with mock.patch.object( - gcs_utils, "download_from_gcs", new=fake_download_from_gcs - ) as download_from_gcs: - yield download_from_gcs - - -@pytest.fixture -def mock_tf_dataset_serialize(tmp_path): - def stateful_serialize(self, to_serialize, gcs_path): - del gcs_path - serializers.TFDatasetSerializer._metadata.dependencies = ["tensorflow==1.0.0"] - try: - to_serialize.save(str(tmp_path / "tf_dataset")) - except AttributeError: - tf.data.experimental.save(to_serialize, str(tmp_path / "tf_dataset")) - - with mock.patch.object( - serializers.TFDatasetSerializer, "serialize", new=stateful_serialize - ) as tf_dataset_serialize: - yield tf_dataset_serialize - serializers.TFDatasetSerializer._metadata.dependencies = [] - - -@pytest.fixture -def mock_tf_dataset_deserialize(): - with mock.patch.object( - serializers.TFDatasetSerializer, "deserialize", autospec=True - ) as tf_dataset_serializer: - yield tf_dataset_serializer - - -@pytest.fixture -def mock_pandas_data_serialize(): - def stateful_serialize(self, to_serialize, gcs_path): - del self, to_serialize, gcs_path - serializers.PandasDataSerializer._metadata.dependencies = ["pandas==1.0.0"] - - with mock.patch.object( - serializers.PandasDataSerializer, "serialize", new=stateful_serialize - ) as data_serialize: - yield data_serialize - serializers.PandasDataSerializer._metadata.dependencies = [] - - -@pytest.fixture -def mock_pandas_data_deserialize(): - with mock.patch.object( - serializers.PandasDataSerializer, "deserialize", autospec=True - ) as pandas_data_deserialize: - yield pandas_data_deserialize - - -@pytest.fixture -def mock_bigframe_deserialize_sklearn(): - with mock.patch.object( - serializers.BigframeSerializer, "_deserialize_sklearn", autospec=True - ) as bigframe_deserialize_sklearn: - yield bigframe_deserialize_sklearn - - -@pytest.fixture -def mock_keras_save_model(): - with mock.patch.object(keras.models.Sequential, "save") as keras_save_model: - yield keras_save_model - - -@pytest.fixture -def mock_keras_load_model(): - with mock.patch("tensorflow.keras.models.load_model") as keras_load_model: - yield keras_load_model - - -@pytest.fixture -def mock_torch_save_model(): - with mock.patch.object(torch, "save", autospec=True) as torch_save_model: - yield torch_save_model - - -@pytest.fixture -def mock_torch_load_model(): - with mock.patch.object(torch, "load", autospec=True) as torch_load_model: - yield torch_load_model - - -@pytest.fixture -def mock_upload_to_gcs(): - with mock.patch.object(gcs_utils, "upload_to_gcs", autospec=True) as upload_to_gcs: - yield upload_to_gcs - - -@pytest.fixture -def mock_json_dump(): - with mock.patch.object(json, "dump", autospec=True) as json_dump: - yield json_dump - - -@pytest.fixture -def mock_cloudpickle_dump(): - with mock.patch.object(cloudpickle, "dump", autospec=True) as cloudpickle_dump: - yield cloudpickle_dump - - -class TestTorchClass(torch.nn.Module): - def __init__(self, input_size=4): - super().__init__() - self.linear_relu_stack = torch.nn.Sequential( - torch.nn.Linear(input_size, 3), torch.nn.ReLU(), torch.nn.Linear(3, 2) - ) - - def forward(self, x): - logits = self.linear_relu_stack(x) - return logits - - -class TestSklearnEstimatorSerializer: - def setup_method(self): - reload(vertexai) - reload(vertexai.preview.initializer) - reload(_logistic) - - def teardown_method(self): - aiplatform.initializer.global_pool.shutdown(wait=True) - - @pytest.mark.usefixtures("mock_storage_blob", "google_auth_mock") - def test_serialize_path_start_with_gs(self, sklearn_estimator_serializer): - # Arrange - fake_gcs_uri = "gs://staging-bucket/fake_gcs_uri" - - train_x = np.array([[1, 1], [1, 2], [2, 2], [2, 3]]) - train_y = np.dot(train_x, np.array([1, 2])) + 3 - sklearn_estimator = _logistic.LogisticRegression() - sklearn_estimator.fit(train_x, train_y) - - # Act - sklearn_estimator_serializer.serialize(sklearn_estimator, fake_gcs_uri) - - # Assert - # The serialized file is written to a local path "fake_gcs_uri" via - # mock_upload_to_gcs for hermicity. - with open(fake_gcs_uri.split("/")[-1], "rb") as f: - restored_estimator = pickle.load(f) - - assert isinstance(restored_estimator, _logistic.LogisticRegression) - assert sklearn_estimator.get_params() == restored_estimator.get_params() - assert (sklearn_estimator.coef_ == restored_estimator.coef_).all() - - def test_serialize_path_start_with_gcs(self, sklearn_estimator_serializer): - # Arrange - fake_gcs_uri = "/gcs/staging-bucket/fake_gcs_uri" - - train_x = np.array([[1, 1], [1, 2], [2, 2], [2, 3]]) - train_y = np.dot(train_x, np.array([1, 2])) + 3 - sklearn_estimator = _logistic.LogisticRegression() - sklearn_estimator.fit(train_x, train_y) - - # Act - with fake_filesystem_unittest.Patcher() as filesystem: - filesystem.fs.create_file(fake_gcs_uri) - sklearn_estimator_serializer.serialize(sklearn_estimator, fake_gcs_uri) - - # Assert - # The serialized file is written to a local path "fake_gcs_uri" via - # mock_upload_to_gcs for hermicity. - with open(fake_gcs_uri, "rb") as f: - restored_estimator = pickle.load(f) - - assert isinstance(restored_estimator, _logistic.LogisticRegression) - assert sklearn_estimator.get_params() == restored_estimator.get_params() - assert (sklearn_estimator.coef_ == restored_estimator.coef_).all() - - def test_serialize_invalid_gcs_path(self, sklearn_estimator_serializer): - # Arrange - fake_gcs_uri = "fake_gcs_uri" - - train_x = np.array([[1, 1], [1, 2], [2, 2], [2, 3]]) - train_y = np.dot(train_x, np.array([1, 2])) + 3 - sklearn_estimator = _logistic.LogisticRegression() - sklearn_estimator.fit(train_x, train_y) - - # Act - with pytest.raises(ValueError, match=f"Invalid gcs path: {fake_gcs_uri}"): - sklearn_estimator_serializer.serialize(sklearn_estimator, fake_gcs_uri) - - @pytest.mark.usefixtures("mock_storage_blob", "google_auth_mock") - def test_deserialize_path_start_with_gs( - self, sklearn_estimator_serializer, mock_storage_blob - ): - # Arrange - fake_gcs_uri = "gs://staging-bucket/fake_gcs_uri" - - train_x = np.array([[1, 1], [1, 2], [2, 2], [2, 3]]) - train_y = np.dot(train_x, np.array([1, 2])) + 3 - sklearn_estimator = _logistic.LogisticRegression() - sklearn_estimator.fit(train_x, train_y) - - def fake_download_file_from_gcs(self, filename): - with open(filename, "wb") as f: - pickle.dump(sklearn_estimator, f) - - mock_storage_blob.download_to_filename = types.MethodType( - fake_download_file_from_gcs, mock_storage_blob - ) - - # Act - restored_estimator = sklearn_estimator_serializer.deserialize(fake_gcs_uri) - - # Assert - assert isinstance(restored_estimator, _logistic.LogisticRegression) - assert sklearn_estimator.get_params() == restored_estimator.get_params() - assert (sklearn_estimator.coef_ == restored_estimator.coef_).all() - - def test_deserialize_path_start_with_gcs(self, sklearn_estimator_serializer): - # Arrange - fake_gcs_uri = "/gcs/staging-bucket/fake_gcs_uri" - - train_x = np.array([[1, 1], [1, 2], [2, 2], [2, 3]]) - train_y = np.dot(train_x, np.array([1, 2])) + 3 - sklearn_estimator = _logistic.LogisticRegression() - sklearn_estimator.fit(train_x, train_y) - - with fake_filesystem_unittest.Patcher() as filesystem: - filesystem.fs.create_file(fake_gcs_uri) - with open(fake_gcs_uri, "wb") as f: - pickle.dump(sklearn_estimator, f) - # Act - restored_estimator = sklearn_estimator_serializer.deserialize(fake_gcs_uri) - - # Assert - assert isinstance(restored_estimator, _logistic.LogisticRegression) - assert sklearn_estimator.get_params() == restored_estimator.get_params() - assert (sklearn_estimator.coef_ == restored_estimator.coef_).all() - - def test_deserialize_invalid_gcs_path(self, sklearn_estimator_serializer): - # Arrange - fake_gcs_uri = "fake_gcs_uri" - - # Act - with pytest.raises(ValueError, match=f"Invalid gcs path: {fake_gcs_uri}"): - sklearn_estimator_serializer.deserialize(fake_gcs_uri) - - -class TestKerasModelSerializer: - @pytest.mark.usefixtures("mock_storage_blob_tmp_dir") - def test_serialize_gcs_path_default_save_format( - self, keras_model_serializer, tmp_path - ): - # Arrange - fake_gcs_uri = "gs://staging-bucket/fake_gcs_uri" - - keras_model = keras.Sequential( - [keras.layers.Dense(8, input_shape=(2,)), keras.layers.Dense(4)] - ) - - # Act - keras_model_serializer.serialize(keras_model, fake_gcs_uri) - - # Assert - # We mocked the storage blob, which writes the content to a temp path - # instead of fake_gcs_uri. The same filename will be used, though. - saved_keras_model_path = tmp_path / "fake_gcs_uri.keras" - assert os.path.exists(saved_keras_model_path) - saved_keras_model = keras.models.load_model(saved_keras_model_path) - assert isinstance(saved_keras_model, keras.models.Sequential) - - @pytest.mark.parametrize("save_format", ["keras", "h5"], ids=["keras", "h5"]) - @pytest.mark.usefixtures("mock_storage_blob_tmp_dir") - def test_serialize_gcs_path(self, keras_model_serializer, tmp_path, save_format): - # Arrange - fake_gcs_uri = "gs://staging-bucket/fake_gcs_uri" - - keras_model = keras.Sequential( - [keras.layers.Dense(8, input_shape=(2,)), keras.layers.Dense(4)] - ) - - # Act - keras_model_serializer.serialize( - keras_model, fake_gcs_uri, save_format=save_format - ) - - # Assert - # We mocked the storage blob, which writes the content to a temp path - # instead of fake_gcs_uri. The same filename will be used, though. - saved_keras_model_path = tmp_path / ("fake_gcs_uri." + save_format) - assert os.path.exists(saved_keras_model_path) - saved_keras_model = keras.models.load_model(saved_keras_model_path) - assert isinstance(saved_keras_model, keras.models.Sequential) - - @pytest.mark.usefixtures("mock_gcs_upload", "mock_isvalid_gcs_path") - def test_serialize_gcs_path_tf_format(self, keras_model_serializer, tmp_path): - # Arrange - fake_gcs_uri = str(tmp_path / "fake_gcs_uri") - - keras_model = keras.Sequential( - [keras.layers.Dense(8, input_shape=(2,)), keras.layers.Dense(4)] - ) - - # Act - keras_model_serializer.serialize(keras_model, fake_gcs_uri, save_format="tf") - - # Assert - # We mocked the storage blob, which writes the content to a temp path - # instead of fake_gcs_uri. The same filename will be used, though. - saved_keras_model_path = tmp_path / ("fake_gcs_uri") - assert os.path.exists(saved_keras_model_path) - saved_keras_model = keras.models.load_model(saved_keras_model_path) - assert isinstance(saved_keras_model, keras.models.Sequential) - - def test_serialize_invalid_gcs_path(self, keras_model_serializer): - # Arrange - fake_gcs_uri = "fake_gcs_uri" - - keras_model = keras.Sequential( - [keras.layers.Dense(8, input_shape=(2,)), keras.layers.Dense(4)] - ) - - # Act - with pytest.raises(ValueError, match=f"Invalid gcs path: {fake_gcs_uri}"): - keras_model_serializer.serialize(keras_model, fake_gcs_uri) - - @pytest.mark.parametrize("save_format", ["keras", "h5"], ids=["keras", "h5"]) - def test_deserialize_gcs_path( - self, - keras_model_serializer, - mock_storage_blob_tmp_dir, - mock_keras_load_model, - save_format, - ): - # Arrange - fake_gcs_uri = "gs://staging-bucket/fake_gcs_uri" - - # This only mocks the metadata loading. - def fake_download_file_from_gcs(self, filename): - with open(filename, "w") as f: - json.dump({"save_format": save_format}, f) - - mock_storage_blob_tmp_dir.download_to_filename = types.MethodType( - fake_download_file_from_gcs, mock_storage_blob_tmp_dir - ) - - # Act - _ = keras_model_serializer.deserialize(fake_gcs_uri) - - # Assert - # We didn't mock the loading process with concrete data, so we simply - # test that it's called. - mock_keras_load_model.assert_called_once() - - @pytest.mark.usefixtures("mock_download_from_gcs_for_keras_model") - def test_deserialize_tf_format( - self, - keras_model_serializer, - mock_storage_blob_tmp_dir, - ): - # Arrange - fake_gcs_uri = "gs://staging-bucket/fake_gcs_uri" - - # This only mocks the metadata loading. - def fake_download_file_from_gcs(self, filename): - with open(filename, "w") as f: - json.dump({"save_format": "tf"}, f) - - mock_storage_blob_tmp_dir.download_to_filename = types.MethodType( - fake_download_file_from_gcs, mock_storage_blob_tmp_dir - ) - - # Act - loaded_keras_model = keras_model_serializer.deserialize(fake_gcs_uri) - - # Assert - assert isinstance(loaded_keras_model, keras.models.Sequential) - - def test_deserialize_invalid_gcs_path(self, keras_model_serializer): - # Arrange - fake_gcs_uri = "fake_gcs_uri" - - # Act - with pytest.raises(ValueError, match=f"Invalid gcs path: {fake_gcs_uri}"): - keras_model_serializer.deserialize(fake_gcs_uri) - - -class TestKerasHistoryCallbackSerializer: - @pytest.mark.usefixtures("mock_isvalid_gcs_path") - def test_serialize_gcs_path(self, keras_history_callback_serializer, tmp_path): - # Arrange - fake_gcs_uri = tmp_path / "fake_gcs_uri" - - keras_model = keras.Sequential( - [keras.layers.Dense(8, input_shape=(2,)), keras.layers.Dense(4)] - ) - history = keras.callbacks.History() - history.history = {"loss": [1.0, 0.5, 0.2]} - history.params = {"verbose": 1, "epochs": 3, "steps": 1} - history.epoch = [0, 1, 2] - history.model = keras_model - - # Act - keras_history_callback_serializer.serialize(history, str(fake_gcs_uri)) - - with open(tmp_path / "fake_gcs_uri", "rb") as f: - deserialized = cloudpickle.load(f) - - assert "model" not in deserialized - assert deserialized["history"]["loss"] == history.history["loss"] - assert deserialized["params"] == history.params - assert deserialized["epoch"] == history.epoch - - def test_serialize_invalid_gcs_path(self, keras_history_callback_serializer): - # Arrange - fake_gcs_uri = "fake_gcs_uri" - - history = keras.callbacks.History() - - # Act - with pytest.raises(ValueError, match=f"Invalid gcs path: {fake_gcs_uri}"): - keras_history_callback_serializer.serialize(history, fake_gcs_uri) - - @pytest.mark.usefixtures("google_auth_mock") - def test_deserialize_gcs_path( - self, - keras_history_callback_serializer, - mock_storage_blob_tmp_dir, - tmp_path, - ): - # Arrange - fake_gcs_uri = "gs://staging-bucket/fake_gcs_uri" - - _ = keras.Sequential( - [keras.layers.Dense(8, input_shape=(2,)), keras.layers.Dense(4)] - ) - history = keras.callbacks.History() - history.history = {"loss": [1.0, 0.5, 0.2]} - history.params = {"verbose": 1, "epochs": 3, "steps": 1} - history.epoch = [0, 1, 2] - - def fake_download_file_from_gcs(self, filename): - with open(tmp_path / filename, "wb") as f: - cloudpickle.dump( - history.__dict__, - f, - ) - - mock_storage_blob_tmp_dir.download_to_filename = types.MethodType( - fake_download_file_from_gcs, mock_storage_blob_tmp_dir - ) - - # Act - restored_history = keras_history_callback_serializer.deserialize(fake_gcs_uri) - - # Assert - assert isinstance(restored_history, keras.callbacks.History) - assert restored_history.model is None - - @pytest.mark.usefixtures("google_auth_mock") - def test_deserialize_gcs_path_with_model( - self, - keras_history_callback_serializer, - mock_storage_blob_tmp_dir, - tmp_path, - ): - # Arrange - fake_gcs_uri = "gs://staging-bucket/fake_gcs_uri" - - keras_model = keras.Sequential( - [keras.layers.Dense(8, input_shape=(2,)), keras.layers.Dense(4)] - ) - history = keras.callbacks.History() - history.history = {"loss": [1.0, 0.5, 0.2]} - history.params = {"verbose": 1, "epochs": 3, "steps": 1} - history.epoch = [0, 1, 2] - - def fake_download_file_from_gcs(self, filename): - with open(tmp_path / filename, "wb") as f: - cloudpickle.dump( - history.__dict__, - f, - ) - - mock_storage_blob_tmp_dir.download_to_filename = types.MethodType( - fake_download_file_from_gcs, mock_storage_blob_tmp_dir - ) - - # Act - restored_history = keras_history_callback_serializer.deserialize( - fake_gcs_uri, model=keras_model - ) - - # Assert - assert isinstance(restored_history, keras.callbacks.History) - assert restored_history.model == keras_model - - def test_deserialize_invalid_gcs_path(self, keras_history_callback_serializer): - # Arrange - fake_gcs_uri = "fake_gcs_uri" - - # Act - with pytest.raises(ValueError, match=f"Invalid gcs path: {fake_gcs_uri}"): - keras_history_callback_serializer.deserialize(fake_gcs_uri) - - -class TestTorchModelSerializer: - def test_serialize_path_start_with_gs( - self, torch_model_serializer, mock_torch_save_model, mock_upload_to_gcs - ): - # Arrange - fake_gcs_uri = "gs://staging-bucket/fake_gcs_uri" - - torch_model = TestTorchClass() - - # Act - torch_model_serializer.serialize(torch_model, fake_gcs_uri) - - # Assert - mock_torch_save_model.assert_called_once_with( - torch_model, - ANY, - pickle_module=cloudpickle, - pickle_protocol=constants.PICKLE_PROTOCOL, - ) - - mock_upload_to_gcs.assert_called_once_with(ANY, fake_gcs_uri) - - def test_serialize_path_start_with_gcs( - self, torch_model_serializer, mock_torch_save_model - ): - # Arrange - fake_gcs_uri = "/gcs/staging-bucket/fake_gcs_uri" - - torch_model = TestTorchClass() - - # Act - - torch_model_serializer.serialize(torch_model, fake_gcs_uri) - - # Assert - mock_torch_save_model.assert_called_once_with( - torch_model, - fake_gcs_uri, - pickle_module=cloudpickle, - pickle_protocol=constants.PICKLE_PROTOCOL, - ) - - def test_serialize_invalid_gcs_path(self, torch_model_serializer): - # Arrange - fake_gcs_uri = "fake_gcs_uri" - - torch_model = TestTorchClass() - - # Act - with pytest.raises(ValueError, match=f"Invalid gcs path: {fake_gcs_uri}"): - torch_model_serializer.serialize(torch_model, fake_gcs_uri) - - @pytest.mark.usefixtures("google_auth_mock") - def test_deserialize_path_start_with_gs( - self, torch_model_serializer, mock_storage_blob_tmp_dir, tmp_path - ): - # TorchModelSerializer only supports torch>=2.0, which supports python>=3.8 - # Skip this test for python 3.7 - if supported_frameworks._get_python_minor_version() == "3.7": - return - - # Arrange - fake_gcs_uri = "gs://staging-bucket/fake_gcs_uri" - - torch_model = TestTorchClass() - - def fake_download_file_from_gcs(self, filename): - torch.save( - torch_model, - os.fspath(tmp_path / filename), - pickle_module=cloudpickle, - pickle_protocol=constants.PICKLE_PROTOCOL, - ) - - mock_storage_blob_tmp_dir.download_to_filename = types.MethodType( - fake_download_file_from_gcs, mock_storage_blob_tmp_dir - ) - - # Act - restored_model = torch_model_serializer.deserialize(fake_gcs_uri) - - # Assert - assert isinstance(restored_model, TestTorchClass) - assert str(torch_model.state_dict()) == str(restored_model.state_dict()) - - def test_deserialize_path_start_with_gcs( - self, torch_model_serializer, mock_torch_load_model - ): - # TorchModelSerializer only supports torch>=2.0, which supports python>=3.8 - # Skip this test for python 3.7 - if supported_frameworks._get_python_minor_version() == "3.7": - return - - # Arrange - fake_gcs_uri = "/gcs/staging-bucket/fake_gcs_uri" - - # Act - _ = torch_model_serializer.deserialize(fake_gcs_uri) - - # Assert - mock_torch_load_model.assert_called_once_with( - fake_gcs_uri, - map_location=None, - ) - - def test_deserialize_invalid_gcs_path(self, torch_model_serializer): - # Arrange - fake_gcs_uri = "fake_gcs_uri" - - # Act - with pytest.raises(ValueError, match=f"Invalid gcs path: {fake_gcs_uri}"): - torch_model_serializer.deserialize(fake_gcs_uri) - - -class TestTorchDataLoaderSerializer: - def test_serialize_dataloader( - self, - torch_dataloader_serializer, - mock_json_dump, - mock_cloudpickle_dump, - mock_upload_to_gcs, - ): - # Arrange - fake_gcs_uri = "gs://staging-bucket/fake_gcs_uri" - - dataloader = torch.utils.data.DataLoader( - torch.utils.data.TensorDataset( - torch.tensor([[1, 2, 3] for i in range(100)]), - torch.tensor([1] * 100), - ), - batch_size=10, - shuffle=True, - ) - - # Act - torch_dataloader_serializer.serialize(dataloader, fake_gcs_uri) - - # Assert - mock_json_dump.assert_called_once_with( - { - "batch_size": dataloader.batch_size, - "num_workers": dataloader.num_workers, - "pin_memory": dataloader.pin_memory, - "drop_last": dataloader.drop_last, - "timeout": dataloader.timeout, - "prefetch_factor": dataloader.prefetch_factor, - "persistent_workers": dataloader.persistent_workers, - "pin_memory_device": dataloader.pin_memory_device, - "generator_device": None, - }, - ANY, - ) - - assert mock_cloudpickle_dump.call_count == 4 - - mock_upload_to_gcs.assert_called_once_with(ANY, fake_gcs_uri) - - def test_serialize_invalid_gcs_path(self, torch_dataloader_serializer): - # Arrange - fake_gcs_uri = "fake_gcs_uri" - - dataloader = torch.utils.data.DataLoader( - torch.utils.data.TensorDataset( - torch.tensor([[1, 2, 3] for i in range(100)]), - torch.tensor([1] * 100), - ), - batch_size=10, - shuffle=True, - ) - - # Act - with pytest.raises(ValueError, match=f"Invalid gcs path: {fake_gcs_uri}"): - torch_dataloader_serializer.serialize(dataloader, fake_gcs_uri) - - @pytest.mark.usefixtures("google_auth_mock") - def test_deserialize_dataloader( - self, - torch_dataloader_serializer, - mock_download_from_gcs_for_torch_dataloader, - ): - # Arrange - fake_gcs_uri = "gs://staging-bucket/fake_gcs_uri" - - expected_dataloader = torch.utils.data.DataLoader( - torch.utils.data.TensorDataset( - torch.tensor([[1, 2, 3] for i in range(100)]), - torch.tensor([1] * 100), - ), - batch_size=10, - shuffle=True, - ) - - # Act - dataloader = torch_dataloader_serializer.deserialize(fake_gcs_uri) - - # Assert - assert dataloader.batch_size == expected_dataloader.batch_size - assert dataloader.num_workers == expected_dataloader.num_workers - assert dataloader.pin_memory == expected_dataloader.pin_memory - assert dataloader.drop_last == expected_dataloader.drop_last - assert dataloader.timeout == expected_dataloader.timeout - assert dataloader.prefetch_factor == expected_dataloader.prefetch_factor - assert dataloader.persistent_workers == expected_dataloader.persistent_workers - - def test_deserialize_invalid_gcs_path(self, torch_dataloader_serializer): - # Arrange - fake_gcs_uri = "fake_gcs_uri" - - # Act - with pytest.raises(ValueError, match=f"Invalid gcs path: {fake_gcs_uri}"): - torch_dataloader_serializer.deserialize(fake_gcs_uri) - - -class TestCloudPickleSerializer: - @pytest.mark.usefixtures("mock_storage_blob", "google_auth_mock") - def test_serialize_func(self, cloudpickle_serializer): - # Arrange - fake_gcs_uri = "gs://staging-bucket/fake_gcs_uri.cpkl" - - def function_to_be_serialized(): - return "return_str" - - # Act - cloudpickle_serializer.serialize(function_to_be_serialized, fake_gcs_uri) - - # Assert - del function_to_be_serialized - # The serialized file is written to a local path "fake_gcs_uri.cpkl" via - # mock_upload_to_gcs for hermicity. - with open(fake_gcs_uri.split("/")[-1], "rb") as f: - restored_fn = cloudpickle.load(f) - assert restored_fn() == "return_str" - - def test_serialize_func_path_start_with_gcs(self, cloudpickle_serializer): - # Arrange - fake_gcs_uri = "/gcs/staging-bucket/fake_gcs_uri.cpkl" - - def function_to_be_serialized(): - return "return_str" - - # Act - with fake_filesystem_unittest.Patcher() as filesystem: - filesystem.fs.create_file(fake_gcs_uri) - cloudpickle_serializer.serialize(function_to_be_serialized, fake_gcs_uri) - - # Assert - del function_to_be_serialized - # The serialized file is written to a local path "fake_gcs_uri.cpkl" via - # mock_upload_to_gcs for hermicity. - with open(fake_gcs_uri, "rb") as f: - restored_fn = cloudpickle.load(f) - assert restored_fn() == "return_str" - - def test_serialize_invalid_gcs_path(self, cloudpickle_serializer): - # Arrange - fake_gcs_uri = "fake_gcs_uri.cpkl" - - def function_to_be_serialized(): - return "return_str" - - # Act - with pytest.raises(ValueError, match=f"Invalid gcs path: {fake_gcs_uri}"): - cloudpickle_serializer.serialize(function_to_be_serialized, fake_gcs_uri) - - @pytest.mark.usefixtures("mock_storage_blob", "google_auth_mock") - def test_serialize_object(self, cloudpickle_serializer): - # Arrange - fake_gcs_uri = "gs://staging-bucket/fake_gcs_uri.cpkl" - - class TestClass: - def test_method(self): - return "return_str" - - test_object = TestClass() - # Act - cloudpickle_serializer.serialize(test_object, fake_gcs_uri) - - # Assert - del test_object - # The serialized file is written to a local path "fake_gcs_uri.cpkl" via - # mock_upload_to_gcs for hermicity. - with open(fake_gcs_uri.split("/")[-1], "rb") as f: - restored_object = cloudpickle.load(f) - assert restored_object.test_method() == "return_str" - - @pytest.mark.usefixtures("mock_storage_blob", "google_auth_mock") - def test_serialize_class(self, cloudpickle_serializer): - # Arrange - fake_gcs_uri = "gs://staging-bucket/fake_gcs_uri.cpkl" - - class TestClass: - def test_method(self): - return "return_str" - - # Act - cloudpickle_serializer.serialize(TestClass, fake_gcs_uri) - - # Assert - del TestClass - # The serialized file is written to a local path "fake_gcs_uri.cpkl" via - # mock_upload_to_gcs for hermicity. - with open(fake_gcs_uri.split("/")[-1], "rb") as f: - restored_class = cloudpickle.load(f) - assert restored_class().test_method() == "return_str" - - @pytest.mark.usefixtures("mock_storage_blob", "google_auth_mock") - def test_deserialize_func(self, cloudpickle_serializer, mock_storage_blob): - # Arrange - fake_gcs_uri = "gs://staging-bucket/fake_gcs_uri.cpkl" - - def test_function(): - return "return_str" - - def fake_download_file_from_gcs(self, filename): - with open(filename, "wb") as f: - cloudpickle.dump(test_function, f) - - mock_storage_blob.download_to_filename = types.MethodType( - fake_download_file_from_gcs, mock_storage_blob - ) - - # Act - restored_fn = cloudpickle_serializer.deserialize(fake_gcs_uri) - - # Assert - assert restored_fn() == "return_str" - - def test_deserialize_func_path_start_with_gcs(self, cloudpickle_serializer): - # Arrange - fake_gcs_uri = "/gcs/staging-bucket/fake_gcs_uri.cpkl" - - def test_function(): - return "return_str" - - with fake_filesystem_unittest.Patcher() as filesystem: - filesystem.fs.create_file(fake_gcs_uri) - with open(fake_gcs_uri, "wb") as f: - cloudpickle.dump(test_function, f) - # Act - restored_fn = cloudpickle_serializer.deserialize(fake_gcs_uri) - - # Assert - assert restored_fn() == "return_str" - - def test_deserialize_func_invalid_gcs_path(self, cloudpickle_serializer): - # Arrange - fake_gcs_uri = "fake_gcs_uri.cpkl" - - def test_function(): - return "return_str" - - # Act - with pytest.raises(ValueError, match=f"Invalid gcs path: {fake_gcs_uri}"): - cloudpickle_serializer.serialize(test_function, fake_gcs_uri) - - @pytest.mark.usefixtures("google_auth_mock") - def test_deserialize_object(self, cloudpickle_serializer, mock_storage_blob): - # Arrange - fake_gcs_uri = "gs://staging-bucket/fake_gcs_uri.cpkl" - - class TestClass: - def test_method(self): - return "return_str" - - def fake_download_file_from_gcs(self, filename: str): - with open(filename, "wb") as f: - cloudpickle.dump(TestClass(), f) - - mock_storage_blob.download_to_filename = types.MethodType( - fake_download_file_from_gcs, mock_storage_blob - ) - - # Act - restored_object = cloudpickle_serializer.deserialize(fake_gcs_uri) - - # Assert - assert restored_object.test_method() == "return_str" - - @pytest.mark.usefixtures("google_auth_mock") - def test_deserialize_class(self, cloudpickle_serializer, mock_storage_blob): - # Arrange - fake_gcs_uri = "gs://staging-bucket/fake_gcs_uri.cpkl" - - class TestClass: - def test_method(self): - return "return_str" - - def fake_download_file_from_gcs(self, filename): - with open(filename, "wb") as f: - cloudpickle.dump(TestClass, f) - - mock_storage_blob.download_to_filename = types.MethodType( - fake_download_file_from_gcs, mock_storage_blob - ) - - # Act - restored_class = cloudpickle_serializer.deserialize(fake_gcs_uri) - - # Assert - assert restored_class().test_method() == "return_str" - - -class TestTFDatasetSerializer: - @pytest.mark.usefixtures("mock_tf_dataset_serialize") - def test_serialize_tf_dataset(self, tf_dataset_serializer, tmp_path): - # Arrange - fake_gcs_uri = "gs://staging-bucket/tf_dataset" - tf_dataset = tf.data.Dataset.from_tensor_slices(np.array([1, 2, 3])) - - # Act - tf_dataset_serializer.serialize(tf_dataset, fake_gcs_uri) - - # Assert - try: - loaded_dataset = tf.data.Dataset.load(str(tmp_path / "tf_dataset")) - except AttributeError: - loaded_dataset = tf.data.experimental.load(str(tmp_path / "tf_dataset")) - for original_ele, loaded_ele in zip(tf_dataset, loaded_dataset): - assert original_ele == loaded_ele - - def test_deserialize_tf_dataset(self, tf_dataset_serializer, tmp_path): - # Arrange - tf_dataset = tf.data.Dataset.from_tensor_slices(np.array([1, 2, 3])) - try: - tf_dataset.save(str(tmp_path / "tf_dataset")) - except AttributeError: - tf.data.experimental.save(tf_dataset, str(tmp_path / "tf_dataset")) - - # Act - loaded_dataset = tf_dataset_serializer.deserialize(str(tmp_path / "tf_dataset")) - - # Assert - for original_ele, loaded_ele in zip(tf_dataset, loaded_dataset): - assert original_ele == loaded_ele - - -class TestPandasDataSerializer: - @pytest.mark.usefixtures("mock_storage_blob_tmp_dir", "google_auth_mock") - def test_serialize_float_only_default_index_dataframe( - self, pandas_data_serializer, tmp_path - ): - # Arrange - fake_gcs_uri = "gs://staging-bucket/fake_gcs_uri.parquet" - - df = pd.DataFrame(np.zeros(shape=[3, 3]), columns=["col1", "col2", "col3"]) - - # Act - pandas_data_serializer.serialize(df, fake_gcs_uri) - - # Assert - # For hermicity, The serialized file is written to a local path - # "tmp_path/fake_gcs_uri.parquet" via mock_storage_blob_tmp_dir. - parquet_file_path = os.fspath(tmp_path / fake_gcs_uri.split("/")[-1]) - restored_df = pd.read_parquet(parquet_file_path) - - pd.testing.assert_frame_equal(df, restored_df) - - @pytest.mark.usefixtures("mock_storage_blob_tmp_dir", "google_auth_mock") - def test_serialize_float_only_str_index(self, pandas_data_serializer, tmp_path): - # Arrange - fake_gcs_uri = "gs://staging-bucket/fake_gcs_uri.parquet" - - df = pd.DataFrame( - np.zeros(shape=[3, 3]), - columns=["col1", "col2", "col3"], - index=["row1", "row2", "row3"], - ) - - # Act - pandas_data_serializer.serialize(df, fake_gcs_uri) - - # Assert - # For hermicity, The serialized file is written to a local path - # "tmp_path/fake_gcs_uri.parquet" via mock_storage_blob_tmp_dir. - parquet_file_path = os.fspath(tmp_path / fake_gcs_uri.split("/")[-1]) - restored_df = pd.read_parquet(parquet_file_path) - - pd.testing.assert_frame_equal(df, restored_df) - - @pytest.mark.usefixtures("mock_storage_blob_tmp_dir", "google_auth_mock") - def test_serialize_common_typed_columns_with_nan( - self, pandas_data_serializer, tmp_path - ): - # Arrange - fake_gcs_uri = "gs://staging-bucket/fake_gcs_uri.parquet" - - df = pd.DataFrame( - np.zeros(shape=[3, 4]), - columns=["str_col", "float_col", "bool_col", "timestamp_col"], - ) - - # object type - df["str_col"] = ["a", np.nan, "b"] - # float type - df["float_clo"] = [1.0, np.nan, np.nan] - # object type - df["bool_col"] = [True, False, np.nan] - # object type - df["timestamp_col"] = [ - pd.Timestamp("20110101"), - np.nan, - pd.Timestamp("20110101"), - ] - - # Act - pandas_data_serializer.serialize(df, fake_gcs_uri) - - # Assert - # For hermicity, The serialized file is written to a local path - # "tmp_path/fake_gcs_uri.parquet" via mock_storage_blob_tmp_dir. - parquet_file_path = os.fspath(tmp_path / fake_gcs_uri.split("/")[-1]) - restored_df = pd.read_parquet(parquet_file_path) - - pd.testing.assert_frame_equal(df, restored_df) - - @pytest.mark.usefixtures("mock_storage_blob_tmp_dir", "google_auth_mock") - def test_serialize_common_typed_columns_with_none( - self, pandas_data_serializer, tmp_path - ): - # Arrange - fake_gcs_uri = "gs://staging-bucket/fake_gcs_uri.parquet" - - df = pd.DataFrame( - np.zeros(shape=[3, 8]), - columns=[ - "str_to_object_col", - "str_col", - "float_col", - "int_to_float_col", - "int_col", - "bool_to_object_col", - "bool_col", - "timestamp_col", - ], - ) - - df["str_to_object_col"] = ["a", None, "b"] - df["str_col"] = ["a", "b", "c"] - - df["float_col"] = [1.0, None, None] # None -> NaN - - df["int_to_float_col"] = [1, 2, None] # None -> NaN - df["int_col"] = [1, 2, 3] - - df["bool_to_object_col"] = [True, False, None] - df["bool_col"] = [True, False, True] - - df["timestamp_col"] = [ - pd.Timestamp("20110101"), - None, - pd.Timestamp("20110101"), - ] # None -> NaT - - # Act - pandas_data_serializer.serialize(df, fake_gcs_uri) - - # Assert - # For hermicity, The serialized file is written to a local path - # "tmp_path/fake_gcs_uri.parquet" via mock_storage_blob_tmp_dir. - parquet_file_path = os.fspath(tmp_path / fake_gcs_uri.split("/")[-1]) - restored_df = pd.read_parquet(parquet_file_path) - - pd.testing.assert_frame_equal(df, restored_df) - - @pytest.mark.usefixtures("mock_storage_blob_tmp_dir", "google_auth_mock") - def test_deserialize_all_floats_cols( - self, pandas_data_serializer, mock_storage_blob_tmp_dir - ): - # Arrange - fake_gcs_uri = "gs://staging-bucket/fake_gcs_uri.parquet" - - df = pd.DataFrame(np.zeros(shape=[3, 3]), columns=["col1", "col2", "col3"]) - - def fake_download_file_from_gcs(self, filename): - df.to_parquet(filename) - - mock_storage_blob_tmp_dir.download_to_filename = types.MethodType( - fake_download_file_from_gcs, mock_storage_blob_tmp_dir - ) - - # Act - restored_df = pandas_data_serializer.deserialize(fake_gcs_uri) - - # Assert - pd.testing.assert_frame_equal(df, restored_df) - - @pytest.mark.usefixtures("mock_storage_blob_tmp_dir", "google_auth_mock") - def test_deserialize_all_floats_cols_str_index( - self, pandas_data_serializer, mock_storage_blob_tmp_dir - ): - # Arrange - fake_gcs_uri = "gs://staging-bucket/fake_gcs_uri.parquet" - - df = pd.DataFrame( - np.zeros(shape=[3, 3]), - columns=["col1", "col2", "col3"], - index=["row1", "row2", "row3"], - ) - - def fake_download_file_from_gcs(self, filename): - df.to_parquet(filename) - - mock_storage_blob_tmp_dir.download_to_filename = types.MethodType( - fake_download_file_from_gcs, mock_storage_blob_tmp_dir - ) - - # Act - restored_df = pandas_data_serializer.deserialize(fake_gcs_uri) - - # Assert - pd.testing.assert_frame_equal(df, restored_df) - - @pytest.mark.usefixtures("mock_storage_blob_tmp_dir", "google_auth_mock") - def test_deserialize_common_types_with_none( - self, pandas_data_serializer, mock_storage_blob_tmp_dir - ): - # Arrange - fake_gcs_uri = "gs://staging-bucket/fake_gcs_uri.parquet" - - df = pd.DataFrame( - np.zeros(shape=[3, 8]), - columns=[ - "str_to_object_col", - "str_col", - "float_col", - "int_to_float_col", - "int_col", - "bool_to_object_col", - "bool_col", - "timestamp_col", - ], - ) - - df["str_to_object_col"] = ["a", None, "b"] - df["str_col"] = ["a", "b", "c"] - - df["float_col"] = [1.0, None, None] # None -> NaN - - df["int_to_float_col"] = [1, 2, None] # None -> NaN - df["int_col"] = [1, 2, 3] - - df["bool_to_object_col"] = [True, False, None] - df["bool_col"] = [True, False, True] - - df["timestamp_col"] = [ - pd.Timestamp("20110101"), - None, - pd.Timestamp("20110101"), - ] # None -> NaT - - def fake_download_file_from_gcs(self, filename): - df.to_parquet(filename) - - mock_storage_blob_tmp_dir.download_to_filename = types.MethodType( - fake_download_file_from_gcs, mock_storage_blob_tmp_dir - ) - - # Act - restored_df = pandas_data_serializer.deserialize(fake_gcs_uri) - - # Assert - pd.testing.assert_frame_equal(df, restored_df) - - @pytest.mark.usefixtures("mock_storage_blob_tmp_dir", "google_auth_mock") - def test_deserialize_common_types_with_nan( - self, pandas_data_serializer, mock_storage_blob_tmp_dir - ): - # Arrange - fake_gcs_uri = "gs://staging-bucket/fake_gcs_uri.parquet" - - df = pd.DataFrame( - np.zeros(shape=[3, 4]), - columns=["str_col", "float_col", "bool_col", "timestamp_col"], - ) - - # object type - df["str_col"] = ["a", np.nan, "b"] - # float type - df["float_clo"] = [1.0, np.nan, np.nan] - # object type - df["bool_col"] = [True, False, np.nan] - # object type - df["timestamp_col"] = [ - pd.Timestamp("20110101"), - np.nan, - pd.Timestamp("20110101"), - ] - - def fake_download_file_from_gcs(self, filename): - df.to_parquet(filename) - - mock_storage_blob_tmp_dir.download_to_filename = types.MethodType( - fake_download_file_from_gcs, mock_storage_blob_tmp_dir - ) - - # Act - restored_df = pandas_data_serializer.deserialize(fake_gcs_uri) - - # Assert - pd.testing.assert_frame_equal(df, restored_df) diff --git a/tests/unit/vertexai/test_serializers_base.py b/tests/unit/vertexai/test_serializers_base.py deleted file mode 100644 index 972787982d..0000000000 --- a/tests/unit/vertexai/test_serializers_base.py +++ /dev/null @@ -1,64 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from vertexai.preview._workflow.serialization_engine import ( - serializers_base, -) - - -class TestSerializerArgs: - def test_object_id_is_saved(self): - class TestClass: - pass - - test_obj = TestClass() - serializer_args = serializers_base.SerializerArgs({test_obj: {"a": 1, "b": 2}}) - assert id(test_obj) in serializer_args - assert test_obj not in serializer_args - - def test_getitem_support_original_object(self): - class TestClass: - pass - - test_obj = TestClass() - serializer_args = serializers_base.SerializerArgs({test_obj: {"a": 1, "b": 2}}) - assert serializer_args[test_obj] == {"a": 1, "b": 2} - - def test_get_support_original_object(self): - class TestClass: - pass - - test_obj = TestClass() - serializer_args = serializers_base.SerializerArgs({test_obj: {"a": 1, "b": 2}}) - assert serializer_args.get(test_obj) == {"a": 1, "b": 2} - - def test_unhashable_obj_saved_successfully(self): - unhashable = [1, 2, 3] - serializer_args = serializers_base.SerializerArgs() - serializer_args[unhashable] = {"a": 1, "b": 2} - assert id(unhashable) in serializer_args - - def test_getitem_support_original_unhashable(self): - unhashable = [1, 2, 3] - serializer_args = serializers_base.SerializerArgs() - serializer_args[unhashable] = {"a": 1, "b": 2} - assert serializer_args[unhashable] == {"a": 1, "b": 2} - - def test_get_support_original_unhashable(self): - unhashable = [1, 2, 3] - serializers_args = serializers_base.SerializerArgs() - serializers_args[unhashable] = {"a": 1, "b": 2} - assert serializers_args.get(unhashable) == {"a": 1, "b": 2} diff --git a/tests/unit/vertexai/test_tabnet_trainer.py b/tests/unit/vertexai/test_tabnet_trainer.py deleted file mode 100644 index d6efce0d3e..0000000000 --- a/tests/unit/vertexai/test_tabnet_trainer.py +++ /dev/null @@ -1,812 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from importlib import reload -import os -import re -from unittest.mock import Mock - -from google.cloud import aiplatform -from google.cloud.aiplatform.compat.types import ( - custom_job as gca_custom_job_compat, -) -from google.cloud.aiplatform.compat.types import io as gca_io_compat -from vertexai.preview._workflow.executor import ( - remote_container_training, -) -from vertexai.preview.tabular_models import ( - tabnet_trainer, -) -from vertexai.preview._workflow.shared import configs -import pandas as pd -import pytest -import tensorflow as tf - -_TEST_STAGING_BUCKET = "gs://test_staging_bucket" -_TEST_JOB_DIR = "gs://test_job_dir" -_TEST_TARGET_COLUMN = "target" -_TEST_MODEL_TYPE_CLASSIFICATION = "classification" -_TEST_MODEL_TYPE_REGRESSION = "regression" -_TEST_LEARNING_RATE = 0.01 -_TEST_DATA = pd.DataFrame(data={"col_0": [0, 1], "col_1": [2, 3]}) -_TEST_PROJECT = "test-project" -_TEST_LOCATION = "us-central1" -_TEST_DISPLAY_NAME = "test" -_TEST_MACHINE_TYPE = "n1-highmem-8" -_TEST_ACCELERATOR_COUNT = 8 -_TEST_ACCELERATOR_TYPE = "NVIDIA_TESLA_K80" -_TEST_BOOT_DISK_TYPE = "test_boot_disk_type" -_TEST_BOOT_DISK_SIZE_GB = 10 - - -class TestTabNetTrainer: - def setup_method(self): - reload(aiplatform.initializer) - reload(aiplatform) - - @pytest.mark.usefixtures( - "google_auth_mock", - "mock_uuid", - "mock_get_custom_job_succeeded", - "mock_blob_upload_from_filename", - ) - def test_tabnet_trainer_default( - self, - mock_create_custom_job, - mock_blob_download_to_filename, - mock_tf_saved_model_load, - ): - test_tabnet_trainer = tabnet_trainer.TabNetTrainer( - model_type=_TEST_MODEL_TYPE_CLASSIFICATION, - target_column=_TEST_TARGET_COLUMN, - learning_rate=_TEST_LEARNING_RATE, - ) - test_tabnet_trainer.fit.vertex.remote_config.staging_bucket = ( - _TEST_STAGING_BUCKET - ) - expected_binding = { - "model_type": _TEST_MODEL_TYPE_CLASSIFICATION, - "target_column": _TEST_TARGET_COLUMN, - "learning_rate": _TEST_LEARNING_RATE, - "enable_profiler": False, - "job_dir": "", - "cache_data": "auto", - "seed": 1, - "large_category_dim": 1, - "large_category_thresh": 300, - "yeo_johnson_transform": False, - "weight_column": "", - "max_steps": -1, - "max_train_secs": -1, - "measurement_selection_type": "BEST_MEASUREMENT", - "optimization_metric": "", - "eval_steps": 0, - "batch_size": 100, - "eval_frequency_secs": 600, - "feature_dim": 64, - "feature_dim_ratio": 0.5, - "num_decision_steps": 6, - "relaxation_factor": 1.5, - "decay_every": 100.0, - "decay_rate": 0.95, - "gradient_thresh": 2000.0, - "sparsity_loss_weight": 0.00001, - "batch_momentum": 0.95, - "batch_size_ratio": 0.25, - "num_transformer_layers": 4, - "num_transformer_layers_ratio": 0.25, - "class_weight": 1.0, - "loss_function_type": "default", - "alpha_focal_loss": 0.25, - "gamma_focal_loss": 2.0, - "is_remote_trainer": True, - } - assert test_tabnet_trainer._binding == expected_binding - - aiplatform.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - ) - test_tabnet_trainer.fit(training_data=_TEST_DATA, validation_data=_TEST_DATA) - - expected_display_name = "TabNetTrainer-fit" - expected_job_dir = os.path.join(_TEST_STAGING_BUCKET, "custom_job") - expected_args = [ - f"--model_type={_TEST_MODEL_TYPE_CLASSIFICATION}", - f"--target_column={_TEST_TARGET_COLUMN}", - f"--learning_rate={_TEST_LEARNING_RATE}", - f"--job_dir={expected_job_dir}", - "--enable_profiler=False", - "--cache_data=auto", - "--seed=1", - "--large_category_dim=1", - "--large_category_thresh=300", - "--yeo_johnson_transform=False", - "--weight_column=", - "--max_steps=-1", - "--max_train_secs=-1", - "--measurement_selection_type=BEST_MEASUREMENT", - "--optimization_metric=", - "--eval_steps=0", - "--batch_size=100", - "--eval_frequency_secs=600", - "--feature_dim=64", - "--feature_dim_ratio=0.5", - "--num_decision_steps=6", - "--relaxation_factor=1.5", - "--decay_every=100.0", - "--decay_rate=0.95", - "--gradient_thresh=2000.0", - "--sparsity_loss_weight=1e-05", - "--batch_momentum=0.95", - "--batch_size_ratio=0.25", - "--num_transformer_layers=4", - "--num_transformer_layers_ratio=0.25", - "--class_weight=1.0", - "--loss_function_type=default", - "--alpha_focal_loss=0.25", - "--gamma_focal_loss=2.0", - "--is_remote_trainer=True", - f"--training_data_path={_TEST_STAGING_BUCKET}/input/training_data_path", - f"--validation_data_path={_TEST_STAGING_BUCKET}/input/validation_data_path", - f"--output_model_path={_TEST_STAGING_BUCKET}/output/output_model_path", - ] - expected_worker_pool_specs = [ - { - "replica_count": 1, - "machine_spec": { - "machine_type": "c2-standard-16", - "accelerator_type": remote_container_training._DEFAULT_ACCELERATOR_TYPE, - "accelerator_count": remote_container_training._DEFAULT_ACCELERATOR_COUNT, - }, - "disk_spec": { - "boot_disk_type": "pd-ssd", - "boot_disk_size_gb": 100, - }, - "container_spec": { - "image_uri": tabnet_trainer._TABNET_TRAINING_IMAGE, - "args": [], - }, - } - ] - expected_custom_job = gca_custom_job_compat.CustomJob( - display_name=f"{expected_display_name}-0", - job_spec=gca_custom_job_compat.CustomJobSpec( - worker_pool_specs=expected_worker_pool_specs, - base_output_directory=gca_io_compat.GcsDestination( - output_uri_prefix=os.path.join(_TEST_STAGING_BUCKET, "custom_job"), - ), - ), - ) - mock_create_custom_job.assert_called_once() - - assert ( - mock_create_custom_job.call_args[1]["parent"] - == f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}" - ) - assert not mock_create_custom_job.call_args[1]["timeout"] - test_custom_job = mock_create_custom_job.call_args[1]["custom_job"] - - test_args = test_custom_job.job_spec.worker_pool_specs[0].container_spec.args - assert set(test_args) == set(expected_args) - - test_custom_job.job_spec.worker_pool_specs[0].container_spec.args = [] - assert test_custom_job == expected_custom_job - - mock_blob_download_to_filename.assert_called_once() - mock_tf_saved_model_load.assert_called_once() - - @pytest.mark.usefixtures( - "google_auth_mock", - "mock_uuid", - "mock_get_custom_job_succeeded", - "mock_blob_upload_from_filename", - ) - def test_tabnet_trainer_all_args( - self, - mock_create_custom_job, - mock_blob_download_to_filename, - mock_tf_saved_model_load, - ): - test_tabnet_trainer = tabnet_trainer.TabNetTrainer( - model_type=_TEST_MODEL_TYPE_CLASSIFICATION, - target_column=_TEST_TARGET_COLUMN, - learning_rate=_TEST_LEARNING_RATE, - job_dir=_TEST_JOB_DIR, - enable_profiler=True, - cache_data="test", - seed=2, - large_category_dim=2, - large_category_thresh=10, - yeo_johnson_transform=True, - weight_column="weight", - max_steps=5, - max_train_secs=600, - measurement_selection_type="LAST_MEASUREMENT", - optimization_metric="rmse", - eval_steps=1, - batch_size=10, - eval_frequency_secs=60, - feature_dim=8, - feature_dim_ratio=0.1, - num_decision_steps=3, - relaxation_factor=1.2, - decay_every=10.0, - decay_rate=0.9, - gradient_thresh=200.0, - sparsity_loss_weight=0.01, - batch_momentum=0.9, - batch_size_ratio=0.2, - num_transformer_layers=2, - num_transformer_layers_ratio=0.2, - class_weight=1.2, - loss_function_type="rmse", - alpha_focal_loss=0.2, - gamma_focal_loss=2.5, - ) - expected_binding = { - "model_type": _TEST_MODEL_TYPE_CLASSIFICATION, - "target_column": _TEST_TARGET_COLUMN, - "learning_rate": _TEST_LEARNING_RATE, - "job_dir": _TEST_JOB_DIR, - "enable_profiler": True, - "cache_data": "test", - "seed": 2, - "large_category_dim": 2, - "large_category_thresh": 10, - "yeo_johnson_transform": True, - "weight_column": "weight", - "max_steps": 5, - "max_train_secs": 600, - "measurement_selection_type": "LAST_MEASUREMENT", - "optimization_metric": "rmse", - "eval_steps": 1, - "batch_size": 10, - "eval_frequency_secs": 60, - "feature_dim": 8, - "feature_dim_ratio": 0.1, - "num_decision_steps": 3, - "relaxation_factor": 1.2, - "decay_every": 10.0, - "decay_rate": 0.9, - "gradient_thresh": 200.0, - "sparsity_loss_weight": 0.01, - "batch_momentum": 0.9, - "batch_size_ratio": 0.2, - "num_transformer_layers": 2, - "num_transformer_layers_ratio": 0.2, - "class_weight": 1.2, - "loss_function_type": "rmse", - "alpha_focal_loss": 0.2, - "gamma_focal_loss": 2.5, - "is_remote_trainer": True, - } - assert test_tabnet_trainer._binding == expected_binding - - aiplatform.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - ) - test_tabnet_trainer.fit.vertex.remote_config.staging_bucket = ( - _TEST_STAGING_BUCKET - ) - test_tabnet_trainer.fit.vertex.remote_config.machine_type = _TEST_MACHINE_TYPE - test_tabnet_trainer.fit.vertex.remote_config.display_name = _TEST_DISPLAY_NAME - ( - test_tabnet_trainer.fit.vertex.remote_config.boot_disk_type - ) = _TEST_BOOT_DISK_TYPE - ( - test_tabnet_trainer.fit.vertex.remote_config.boot_disk_size_gb - ) = _TEST_BOOT_DISK_SIZE_GB - ( - test_tabnet_trainer.fit.vertex.remote_config.accelerator_type - ) = _TEST_ACCELERATOR_TYPE - ( - test_tabnet_trainer.fit.vertex.remote_config.accelerator_count - ) = _TEST_ACCELERATOR_COUNT - test_tabnet_trainer.fit(training_data=_TEST_DATA, validation_data=_TEST_DATA) - - expected_display_name = "TabNetTrainer-test" - expected_args = [ - f"--model_type={_TEST_MODEL_TYPE_CLASSIFICATION}", - f"--target_column={_TEST_TARGET_COLUMN}", - f"--learning_rate={_TEST_LEARNING_RATE}", - f"--job_dir={_TEST_JOB_DIR}", - "--enable_profiler=True", - "--cache_data=test", - "--seed=2", - "--large_category_dim=2", - "--large_category_thresh=10", - "--yeo_johnson_transform=True", - "--weight_column=weight", - "--max_steps=5", - "--max_train_secs=600", - "--measurement_selection_type=LAST_MEASUREMENT", - "--optimization_metric=rmse", - "--eval_steps=1", - "--batch_size=10", - "--eval_frequency_secs=60", - "--feature_dim=8", - "--feature_dim_ratio=0.1", - "--num_decision_steps=3", - "--relaxation_factor=1.2", - "--decay_every=10.0", - "--decay_rate=0.9", - "--gradient_thresh=200.0", - "--sparsity_loss_weight=0.01", - "--batch_momentum=0.9", - "--batch_size_ratio=0.2", - "--num_transformer_layers=2", - "--num_transformer_layers_ratio=0.2", - "--class_weight=1.2", - "--loss_function_type=rmse", - "--alpha_focal_loss=0.2", - "--gamma_focal_loss=2.5", - "--is_remote_trainer=True", - f"--training_data_path={_TEST_STAGING_BUCKET}/input/training_data_path", - f"--validation_data_path={_TEST_STAGING_BUCKET}/input/validation_data_path", - f"--output_model_path={_TEST_STAGING_BUCKET}/output/output_model_path", - ] - expected_worker_pool_specs = [ - { - "replica_count": 1, - "machine_spec": { - "machine_type": _TEST_MACHINE_TYPE, - "accelerator_type": _TEST_ACCELERATOR_TYPE, - "accelerator_count": _TEST_ACCELERATOR_COUNT, - }, - "disk_spec": { - "boot_disk_type": _TEST_BOOT_DISK_TYPE, - "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB, - }, - "container_spec": { - "image_uri": tabnet_trainer._TABNET_TRAINING_IMAGE, - "args": [], - }, - } - ] - expected_custom_job = gca_custom_job_compat.CustomJob( - display_name=f"{expected_display_name}-0", - job_spec=gca_custom_job_compat.CustomJobSpec( - worker_pool_specs=expected_worker_pool_specs, - base_output_directory=gca_io_compat.GcsDestination( - output_uri_prefix=os.path.join(_TEST_STAGING_BUCKET, "custom_job"), - ), - ), - ) - mock_create_custom_job.assert_called_once() - - assert ( - mock_create_custom_job.call_args[1]["parent"] - == f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}" - ) - assert not mock_create_custom_job.call_args[1]["timeout"] - test_custom_job = mock_create_custom_job.call_args[1]["custom_job"] - - test_args = test_custom_job.job_spec.worker_pool_specs[0].container_spec.args - assert set(test_args) == set(expected_args) - - test_custom_job.job_spec.worker_pool_specs[0].container_spec.args = [] - assert test_custom_job == expected_custom_job - - mock_blob_download_to_filename.assert_called_once() - mock_tf_saved_model_load.assert_called_once() - - @pytest.mark.usefixtures( - "google_auth_mock", - "mock_uuid", - "mock_get_custom_job_succeeded", - "mock_blob_upload_from_filename", - ) - def test_tabnet_trainer_all_args_with_set_config_method( - self, - mock_create_custom_job, - mock_blob_download_to_filename, - mock_tf_saved_model_load, - ): - test_tabnet_trainer = tabnet_trainer.TabNetTrainer( - model_type=_TEST_MODEL_TYPE_CLASSIFICATION, - target_column=_TEST_TARGET_COLUMN, - learning_rate=_TEST_LEARNING_RATE, - job_dir=_TEST_JOB_DIR, - enable_profiler=True, - cache_data="test", - seed=2, - large_category_dim=2, - large_category_thresh=10, - yeo_johnson_transform=True, - weight_column="weight", - max_steps=5, - max_train_secs=600, - measurement_selection_type="LAST_MEASUREMENT", - optimization_metric="rmse", - eval_steps=1, - batch_size=10, - eval_frequency_secs=60, - feature_dim=8, - feature_dim_ratio=0.1, - num_decision_steps=3, - relaxation_factor=1.2, - decay_every=10.0, - decay_rate=0.9, - gradient_thresh=200.0, - sparsity_loss_weight=0.01, - batch_momentum=0.9, - batch_size_ratio=0.2, - num_transformer_layers=2, - num_transformer_layers_ratio=0.2, - class_weight=1.2, - loss_function_type="rmse", - alpha_focal_loss=0.2, - gamma_focal_loss=2.5, - ) - expected_binding = { - "model_type": _TEST_MODEL_TYPE_CLASSIFICATION, - "target_column": _TEST_TARGET_COLUMN, - "learning_rate": _TEST_LEARNING_RATE, - "job_dir": _TEST_JOB_DIR, - "enable_profiler": True, - "cache_data": "test", - "seed": 2, - "large_category_dim": 2, - "large_category_thresh": 10, - "yeo_johnson_transform": True, - "weight_column": "weight", - "max_steps": 5, - "max_train_secs": 600, - "measurement_selection_type": "LAST_MEASUREMENT", - "optimization_metric": "rmse", - "eval_steps": 1, - "batch_size": 10, - "eval_frequency_secs": 60, - "feature_dim": 8, - "feature_dim_ratio": 0.1, - "num_decision_steps": 3, - "relaxation_factor": 1.2, - "decay_every": 10.0, - "decay_rate": 0.9, - "gradient_thresh": 200.0, - "sparsity_loss_weight": 0.01, - "batch_momentum": 0.9, - "batch_size_ratio": 0.2, - "num_transformer_layers": 2, - "num_transformer_layers_ratio": 0.2, - "class_weight": 1.2, - "loss_function_type": "rmse", - "alpha_focal_loss": 0.2, - "gamma_focal_loss": 2.5, - "is_remote_trainer": True, - } - assert test_tabnet_trainer._binding == expected_binding - - aiplatform.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - ) - - test_tabnet_trainer.fit.vertex.set_config( - staging_bucket=_TEST_STAGING_BUCKET, - machine_type=_TEST_MACHINE_TYPE, - display_name=_TEST_DISPLAY_NAME, - boot_disk_type=_TEST_BOOT_DISK_TYPE, - boot_disk_size_gb=_TEST_BOOT_DISK_SIZE_GB, - accelerator_type=_TEST_ACCELERATOR_TYPE, - accelerator_count=_TEST_ACCELERATOR_COUNT, - ) - - assert isinstance( - test_tabnet_trainer.fit.vertex.remote_config, - configs.DistributedTrainingConfig, - ) - - test_tabnet_trainer.fit(training_data=_TEST_DATA, validation_data=_TEST_DATA) - - expected_display_name = "TabNetTrainer-test" - expected_args = [ - f"--model_type={_TEST_MODEL_TYPE_CLASSIFICATION}", - f"--target_column={_TEST_TARGET_COLUMN}", - f"--learning_rate={_TEST_LEARNING_RATE}", - f"--job_dir={_TEST_JOB_DIR}", - "--enable_profiler=True", - "--cache_data=test", - "--seed=2", - "--large_category_dim=2", - "--large_category_thresh=10", - "--yeo_johnson_transform=True", - "--weight_column=weight", - "--max_steps=5", - "--max_train_secs=600", - "--measurement_selection_type=LAST_MEASUREMENT", - "--optimization_metric=rmse", - "--eval_steps=1", - "--batch_size=10", - "--eval_frequency_secs=60", - "--feature_dim=8", - "--feature_dim_ratio=0.1", - "--num_decision_steps=3", - "--relaxation_factor=1.2", - "--decay_every=10.0", - "--decay_rate=0.9", - "--gradient_thresh=200.0", - "--sparsity_loss_weight=0.01", - "--batch_momentum=0.9", - "--batch_size_ratio=0.2", - "--num_transformer_layers=2", - "--num_transformer_layers_ratio=0.2", - "--class_weight=1.2", - "--loss_function_type=rmse", - "--alpha_focal_loss=0.2", - "--gamma_focal_loss=2.5", - "--is_remote_trainer=True", - f"--training_data_path={_TEST_STAGING_BUCKET}/input/training_data_path", - f"--validation_data_path={_TEST_STAGING_BUCKET}/input/validation_data_path", - f"--output_model_path={_TEST_STAGING_BUCKET}/output/output_model_path", - ] - expected_worker_pool_specs = [ - { - "replica_count": 1, - "machine_spec": { - "machine_type": _TEST_MACHINE_TYPE, - "accelerator_type": _TEST_ACCELERATOR_TYPE, - "accelerator_count": _TEST_ACCELERATOR_COUNT, - }, - "disk_spec": { - "boot_disk_type": _TEST_BOOT_DISK_TYPE, - "boot_disk_size_gb": _TEST_BOOT_DISK_SIZE_GB, - }, - "container_spec": { - "image_uri": tabnet_trainer._TABNET_TRAINING_IMAGE, - "args": [], - }, - } - ] - expected_custom_job = gca_custom_job_compat.CustomJob( - display_name=f"{expected_display_name}-0", - job_spec=gca_custom_job_compat.CustomJobSpec( - worker_pool_specs=expected_worker_pool_specs, - base_output_directory=gca_io_compat.GcsDestination( - output_uri_prefix=os.path.join(_TEST_STAGING_BUCKET, "custom_job"), - ), - ), - ) - mock_create_custom_job.assert_called_once() - - assert ( - mock_create_custom_job.call_args[1]["parent"] - == f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}" - ) - assert not mock_create_custom_job.call_args[1]["timeout"] - test_custom_job = mock_create_custom_job.call_args[1]["custom_job"] - - test_args = test_custom_job.job_spec.worker_pool_specs[0].container_spec.args - assert set(test_args) == set(expected_args) - - test_custom_job.job_spec.worker_pool_specs[0].container_spec.args = [] - assert test_custom_job == expected_custom_job - - mock_blob_download_to_filename.assert_called_once() - mock_tf_saved_model_load.assert_called_once() - - def test_tabnet_trainer_predict_classification(self): - test_col_0 = [1.0, 3.0, 5.0] - test_col_1 = [2, 4, 6] - test_col_cat = [0, 1, 0] - - test_tabnet_trainer = tabnet_trainer.TabNetTrainer( - model_type=_TEST_MODEL_TYPE_CLASSIFICATION, - target_column=_TEST_TARGET_COLUMN, - learning_rate=_TEST_LEARNING_RATE, - ) - test_tabnet_trainer.model = Mock() - mock_serving_default = Mock() - mock_serving_default.return_value = { - "scores": tf.constant([[0.1, 0.9], [0.8, 0.2], [0.4, 0.6]]), - "classes": tf.constant([[0, 1], [0, 1], [0, 1]]), - } - expected_predict_results = pd.DataFrame({_TEST_TARGET_COLUMN: [1, 0, 1]}) - test_tabnet_trainer.model.signatures = {"serving_default": mock_serving_default} - test_data = pd.DataFrame( - {"col_0": test_col_0, "col_1": test_col_1, "col_cat": test_col_cat} - ) - test_data["col_cat"] = test_data["col_cat"].astype("category") - test_predict_results = test_tabnet_trainer.predict(test_data) - assert test_predict_results.equals(expected_predict_results) - - mock_serving_default.assert_called_once() - - assert not mock_serving_default.call_args[0] - assert list(mock_serving_default.call_args[1].keys()) == [ - "col_0", - "col_1", - "col_cat", - ] - - expected_input_col_0 = tf.constant(test_col_0, dtype=tf.float64) - assert ( - tf.equal(mock_serving_default.call_args[1]["col_0"], expected_input_col_0) - .numpy() - .all() - ) - expected_input_col_1 = tf.constant(test_col_1, dtype=tf.int64) - assert ( - tf.equal(mock_serving_default.call_args[1]["col_1"], expected_input_col_1) - .numpy() - .all() - ) - expected_input_col_cat = tf.constant(test_col_cat, dtype=tf.int64) - assert ( - tf.equal( - mock_serving_default.call_args[1]["col_cat"], expected_input_col_cat - ) - .numpy() - .all() - ) - - def test_tabnet_trainer_predict_regression(self): - test_col_0 = [1.0, 3.0, 5.0] - test_col_1 = [2, 4, 6] - test_col_cat = [0, 1, 0] - - test_tabnet_trainer = tabnet_trainer.TabNetTrainer( - model_type=_TEST_MODEL_TYPE_REGRESSION, - target_column=_TEST_TARGET_COLUMN, - learning_rate=_TEST_LEARNING_RATE, - ) - test_tabnet_trainer.model = Mock() - mock_serving_default = Mock() - mock_serving_default.return_value = { - "value": tf.constant([[0.1], [0.2], [0.3]], dtype=tf.float64) - } - expected_predict_results = pd.DataFrame({_TEST_TARGET_COLUMN: [0.1, 0.2, 0.3]}) - test_tabnet_trainer.model.signatures = {"serving_default": mock_serving_default} - test_data = pd.DataFrame( - {"col_0": test_col_0, "col_1": test_col_1, "col_cat": test_col_cat} - ) - test_data["col_cat"] = test_data["col_cat"].astype("category") - test_predict_results = test_tabnet_trainer.predict(test_data) - assert test_predict_results.equals(expected_predict_results) - - mock_serving_default.assert_called_once() - - assert not mock_serving_default.call_args[0] - assert list(mock_serving_default.call_args[1].keys()) == [ - "col_0", - "col_1", - "col_cat", - ] - - expected_input_col_0 = tf.constant(test_col_0, dtype=tf.float64) - assert ( - tf.equal(mock_serving_default.call_args[1]["col_0"], expected_input_col_0) - .numpy() - .all() - ) - expected_input_col_1 = tf.constant(test_col_1, dtype=tf.int64) - assert ( - tf.equal(mock_serving_default.call_args[1]["col_1"], expected_input_col_1) - .numpy() - .all() - ) - expected_input_col_cat = tf.constant(test_col_cat, dtype=tf.int64) - assert ( - tf.equal( - mock_serving_default.call_args[1]["col_cat"], expected_input_col_cat - ) - .numpy() - .all() - ) - - def test_tabnet_trainer_predict_load_model(self, mock_tf_saved_model_load): - test_col_0 = [1.0, 3.0, 5.0] - test_col_1 = [2, 4, 6] - test_col_cat = [0, 1, 0] - - test_tabnet_trainer = tabnet_trainer.TabNetTrainer( - model_type=_TEST_MODEL_TYPE_CLASSIFICATION, - target_column=_TEST_TARGET_COLUMN, - learning_rate=_TEST_LEARNING_RATE, - ) - test_tabnet_trainer.output_model_path = ( - f"{_TEST_STAGING_BUCKET}/output/output_model_path" - ) - mock_serving_default = Mock() - mock_serving_default.return_value = { - "scores": tf.constant([[0.1, 0.9], [0.8, 0.2], [0.4, 0.6]]), - "classes": tf.constant([[0, 1], [0, 1], [0, 1]]), - } - expected_predict_results = pd.DataFrame({_TEST_TARGET_COLUMN: [1, 0, 1]}) - mock_tf_saved_model_load.return_value.signatures = { - "serving_default": mock_serving_default - } - test_data = pd.DataFrame( - {"col_0": test_col_0, "col_1": test_col_1, "col_cat": test_col_cat} - ) - test_data["col_cat"] = test_data["col_cat"].astype("category") - test_predict_results = test_tabnet_trainer.predict(test_data) - assert test_predict_results.equals(expected_predict_results) - - mock_tf_saved_model_load.assert_called_once_with( - test_tabnet_trainer.output_model_path - ) - mock_serving_default.assert_called_once() - - assert not mock_serving_default.call_args[0] - assert list(mock_serving_default.call_args[1].keys()) == [ - "col_0", - "col_1", - "col_cat", - ] - - expected_input_col_0 = tf.constant(test_col_0, dtype=tf.float64) - assert ( - tf.equal(mock_serving_default.call_args[1]["col_0"], expected_input_col_0) - .numpy() - .all() - ) - expected_input_col_1 = tf.constant(test_col_1, dtype=tf.int64) - assert ( - tf.equal(mock_serving_default.call_args[1]["col_1"], expected_input_col_1) - .numpy() - .all() - ) - expected_input_col_cat = tf.constant(test_col_cat, dtype=tf.int64) - assert ( - tf.equal( - mock_serving_default.call_args[1]["col_cat"], expected_input_col_cat - ) - .numpy() - .all() - ) - - def test_tabnet_trainer_predict_no_trained_model(self): - test_tabnet_trainer = tabnet_trainer.TabNetTrainer( - model_type=_TEST_MODEL_TYPE_CLASSIFICATION, - target_column=_TEST_TARGET_COLUMN, - learning_rate=_TEST_LEARNING_RATE, - ) - err_msg = re.escape("No trained model. Please call .fit first.") - with pytest.raises(ValueError, match=err_msg): - test_tabnet_trainer.predict(pd.DataFrame()) - - self.output_model_path = None - with pytest.raises(ValueError, match=err_msg): - test_tabnet_trainer.predict(pd.DataFrame()) - - def test_tabnet_trainer_predict_invalid_model_type(self): - test_invalid_model_type = "invalid" - test_tabnet_trainer = tabnet_trainer.TabNetTrainer( - model_type=test_invalid_model_type, - target_column=_TEST_TARGET_COLUMN, - learning_rate=_TEST_LEARNING_RATE, - ) - test_tabnet_trainer.model = Mock() - test_tabnet_trainer.model.signatures = {"serving_default": Mock()} - err_msg = f"Unsupported model type: {test_invalid_model_type}" - with pytest.raises(ValueError, match=err_msg): - test_tabnet_trainer.predict(pd.DataFrame()) - - def test_tabnet_trainer_invalid_gcs_path(self): - test_invalid_path = "invalid_gcs_path" - err_msg = re.escape( - f"Invalid GCS path {test_invalid_path}. Please provide a valid GCS path starting with 'gs://'" - ) - with pytest.raises(ValueError, match=err_msg): - tabnet_trainer.TabNetTrainer( - model_type=_TEST_MODEL_TYPE_CLASSIFICATION, - target_column=_TEST_TARGET_COLUMN, - learning_rate=_TEST_LEARNING_RATE, - job_dir=test_invalid_path, - ) diff --git a/tests/unit/vertexai/test_vizier_hyperparameter_tuner.py b/tests/unit/vertexai/test_vizier_hyperparameter_tuner.py deleted file mode 100644 index 1fe8b7d03f..0000000000 --- a/tests/unit/vertexai/test_vizier_hyperparameter_tuner.py +++ /dev/null @@ -1,1850 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -"""Tests for hyperparameter_tuning/vizier_hyperparameter_tuner.py. -""" - -import concurrent -from importlib import reload -from unittest import mock - -from google.cloud import aiplatform -import vertexai -from google.cloud.aiplatform_v1.services.vizier_service import ( - VizierServiceClient, -) -from google.cloud.aiplatform_v1.types.study import Measurement -from google.cloud.aiplatform_v1.types.study import Trial -from google.cloud.aiplatform_v1.types.vizier_service import ( - SuggestTrialsResponse, -) -from vertexai.preview._workflow.driver import remote -from vertexai.preview._workflow.driver import ( - VertexRemoteFunctor, -) -from vertexai.preview._workflow.executor import training -from vertexai.preview._workflow.shared import configs -from vertexai.preview.developer import remote_specs -from vertexai.preview.hyperparameter_tuning import ( - VizierHyperparameterTuner, -) -import numpy as np -import pandas as pd -import pytest -from sklearn.linear_model import _logistic -import sklearn.metrics -import tensorflow as tf - - -_TEST_PARAMETER_SPEC = { - "parameter_id": "x", - "double_value_spec": {"min_value": -10.0, "max_value": 10.0}, -} -_TEST_PROJECT = "test-project" -_TEST_LOCATION = "us-central1" -_TEST_STUDY_NAME_PREFIX = "test_study" - -_TRAIN_COL_0 = np.array([0.1] * 100) -_TRAIN_COL_1 = np.array([0.2] * 100) -_TEST_COL_0 = np.array([0.3] * 100) -_TEST_COL_1 = np.array([0.4] * 100) -_TRAIN_TARGET = np.array([1] * 100) -_TEST_TARGET = np.array([1] * 100) -_TEST_X_TRAIN = pd.DataFrame({"col_0": _TRAIN_COL_0, "col_1": _TRAIN_COL_1}) -_TEST_Y_TRAIN = pd.DataFrame( - { - "target": _TRAIN_TARGET, - } -) -_TEST_TRAINING_DATA = pd.DataFrame( - {"col_0": _TRAIN_COL_0, "col_1": _TRAIN_COL_0, "target": _TRAIN_TARGET} -) -_TEST_X_TEST = pd.DataFrame({"col_0": _TEST_COL_0, "col_1": _TEST_COL_1}) -_TEST_Y_TEST_CLASSIFICATION_BINARY = pd.DataFrame( - { - "target": np.array([0] * 50 + [1] * 50), - } -) -_TEST_Y_PRED_CLASSIFICATION_BINARY = pd.DataFrame( - { - "target": np.array([0] * 30 + [1] * 70), - } -) -_TEST_Y_TEST_CLASSIFICATION_MULTI_CLASS = pd.DataFrame( - { - "target": np.array([1] * 25 + [2] * 25 + [3] * 25 + [4] * 25), - } -) -_TEST_Y_PRED_CLASSIFICATION_MULTI_CLASS = pd.DataFrame( - { - "target": np.array([1] * 25 + [2] * 25 + [4] * 25 + [8] * 25), - } -) -_TEST_Y_TEST_CLASSIFICATION_BINARY_KERAS = pd.DataFrame( - {"target": np.array([0, 1, 0, 1, 0])} -) -_TEST_Y_PRED_CLASSIFICATION_BINARY_KERAS = pd.DataFrame( - {"target": np.array([0.01, 0.56, 0.03, 0.65, 0.74])} -) -_TEST_Y_PRED_CLASSIFICATION_BINARY_KERAS_TRANSFORMED = pd.DataFrame( - {"target": np.array([0, 1, 0, 1, 1])} -) -_TEST_Y_TEST_CLASSIFICATION_MULTI_CLASS_KERAS = pd.DataFrame( - {"target": np.array([0, 1, 2, 1, 2])} -) -_TEST_Y_PRED_CLASSIFICATION_MULTI_CLASS_KERAS = pd.DataFrame( - { - "target_0": [0.98, 0.02, 0.01, 0.02, 0.02], - "target_1": [0.01, 0.97, 0.34, 0.96, 0.95], - "target_2": [0.01, 0.01, 0.65, 0.02, 0.03], - } -) -_TEST_Y_PRED_CLASSIFICATION_MULTI_CLASS_KERAS_TRANSFORMED = pd.DataFrame( - {"target": np.array([0, 1, 2, 1, 1])} -) -_TEST_Y_TEST_REGRESSION = pd.DataFrame( - { - "target": np.array([0.6] * 100), - } -) -_TEST_Y_PRED_REGRESSION = pd.DataFrame( - { - "target": np.array([0.8] * 100), - } -) -_TEST_CUSTOM_METRIC_VALUE = 0.5 -_TEST_VALIDATION_DATA = pd.DataFrame( - { - "col_0": _TEST_COL_0, - "col_1": _TEST_COL_1, - "target": _TEST_Y_TEST_CLASSIFICATION_BINARY["target"], - } -) - -_TEST_DISPLAY_NAME = "test_display_name" -_TEST_STAGING_BUCKET = "gs://test-staging-bucket" -_TEST_CONTAINER_URI = "gcr.io/test-image" -_TEST_CONTAINER_URI = "gcr.io/test-image" -_TEST_MACHINE_TYPE = "n1-standard-4" -_TEST_SERVICE_ACCOUNT = "test-service-account" -_TEST_TRAINING_CONFIG = configs.RemoteConfig( - display_name=_TEST_DISPLAY_NAME, - staging_bucket=_TEST_STAGING_BUCKET, - container_uri=_TEST_CONTAINER_URI, - machine_type=_TEST_MACHINE_TYPE, - service_account=_TEST_SERVICE_ACCOUNT, -) -_TEST_REMOTE_CONTAINER_TRAINING_CONFIG = configs.DistributedTrainingConfig( - display_name=_TEST_DISPLAY_NAME, - staging_bucket=_TEST_STAGING_BUCKET, -) -_TEST_TRIAL_NAME = "projects/123/locations/us/central1/studies/123/trials/1" -_TEST_TRIAL_STAGING_BUCKET = ( - _TEST_STAGING_BUCKET + "/projects-123-locations-us-central1-studies-123-trials/1" -) - - -@pytest.fixture -def mock_create_study(): - with mock.patch.object(VizierServiceClient, "create_study") as create_study_mock: - create_study_mock.return_value.name = "test_study" - yield create_study_mock - - -@pytest.fixture -def mock_suggest_trials(): - with mock.patch.object( - VizierServiceClient, "suggest_trials" - ) as suggest_trials_mock: - yield suggest_trials_mock - - -@pytest.fixture -def mock_list_trials(): - with mock.patch.object(VizierServiceClient, "list_trials") as list_trials_mock: - list_trials_mock.return_value.trials = [ - Trial( - name="trial_0", - final_measurement=Measurement( - metrics=[Measurement.Metric(metric_id="accuracy", value=0.5)] - ), - state=Trial.State.SUCCEEDED, - ), - Trial( - name="trial_1", - final_measurement=Measurement( - metrics=[Measurement.Metric(metric_id="accuracy", value=0.34)] - ), - state=Trial.State.SUCCEEDED, - ), - Trial( - name="trial_2", - final_measurement=Measurement( - metrics=[Measurement.Metric(metric_id="accuracy", value=0.99)] - ), - state=Trial.State.SUCCEEDED, - ), - Trial( - name="trial_3", - final_measurement=Measurement( - metrics=[Measurement.Metric(metric_id="accuracy", value=1.0)] - ), - state=Trial.State.STOPPING, - ), - ] - yield list_trials_mock - - -@pytest.fixture -def mock_complete_trial(): - with mock.patch.object( - VizierServiceClient, "complete_trial" - ) as complete_trial_mock: - yield complete_trial_mock - - -@pytest.fixture -def mock_binary_classifier(): - model = mock.Mock() - model.predict.return_value = _TEST_Y_PRED_CLASSIFICATION_BINARY - yield model - - -@pytest.fixture -def mock_multi_class_classifier(): - model = mock.Mock() - model.predict.return_value = _TEST_Y_PRED_CLASSIFICATION_MULTI_CLASS - yield model - - -@pytest.fixture -def mock_regressor(): - model = mock.Mock() - model.predict.return_value = _TEST_Y_PRED_REGRESSION - return model - - -@pytest.fixture -def mock_model_custom_metric(): - model = mock.Mock() - model.score.return_value = _TEST_CUSTOM_METRIC_VALUE - yield model - - -@pytest.fixture -def mock_executor_map(): - with mock.patch.object( - concurrent.futures.ThreadPoolExecutor, "map" - ) as executor_map_mock: - yield executor_map_mock - - -@pytest.fixture -def mock_keras_classifier(): - with mock.patch("tensorflow.keras.Sequential", autospec=True) as keras_mock: - yield keras_mock - - -class TestTrainerA(remote.VertexModel): - def predict(self, x_test): - return - - @vertexai.preview.developer.mark.train( - remote_config=_TEST_TRAINING_CONFIG, - ) - def train(self, x, y): - return - - -def get_test_trainer_a(): - model = TestTrainerA() - model.predict = mock.Mock() - model.predict.return_value = _TEST_Y_PRED_CLASSIFICATION_BINARY - return model - - -class TestTrainerB(remote.VertexModel): - def predict(self, x_test): - return - - @vertexai.preview.developer.mark.train( - remote_config=_TEST_TRAINING_CONFIG, - ) - def train(self, x_train, y_train, x_test, y_test): - return - - -def get_test_trainer_b(): - model = TestTrainerB() - model.predict = mock.Mock() - model.predict.return_value = _TEST_Y_PRED_CLASSIFICATION_BINARY - return model - - -class TestRemoteContainerTrainer(remote.VertexModel): - def __init__(self): - super().__init__() - self._binding = {} - - def predict(self, x_test): - return - - # pylint: disable=invalid-name,missing-function-docstring - @vertexai.preview.developer.mark._remote_container_train( - image_uri=_TEST_CONTAINER_URI, - additional_data=[ - remote_specs._InputParameterSpec( - "training_data", - argument_name="training_data_path", - serializer="parquet", - ), - remote_specs._InputParameterSpec( - "validation_data", - argument_name="validation_data_path", - serializer="parquet", - ), - ], - remote_config=_TEST_REMOTE_CONTAINER_TRAINING_CONFIG, - ) - def fit(self, training_data, validation_data): - return - - -def get_test_remote_container_trainer(): - model = TestRemoteContainerTrainer() - model.predict = mock.Mock() - model.predict.return_value = _TEST_Y_PRED_CLASSIFICATION_BINARY - return model - - -class TestVizierHyperparameterTuner: - def setup_method(self): - reload(aiplatform.initializer) - reload(aiplatform) - reload(vertexai.preview.initializer) - reload(vertexai) - - @pytest.mark.usefixtures("google_auth_mock", "mock_uuid") - def test_vizier_hyper_parameter_tuner(self, mock_create_study): - vertexai.init(project=_TEST_PROJECT, location=_TEST_LOCATION) - - test_model_name = "test_model" - test_max_trial_count = 16 - test_parallel_trial_count = 4 - test_hparam_space = [_TEST_PARAMETER_SPEC] - test_metric_id = "rmse" - test_metric_goal = "MINIMIZE" - test_max_failed_trial_count = 12 - test_search_algorithm = "RANDOM_SEARCH" - test_project = "custom-project" - test_location = "custom-location" - - def get_model_func(): - model = mock.Mock() - model.name = test_model_name - return model - - test_tuner = VizierHyperparameterTuner( - get_model_func=get_model_func, - max_trial_count=test_max_trial_count, - parallel_trial_count=test_parallel_trial_count, - hparam_space=test_hparam_space, - metric_id=test_metric_id, - metric_goal=test_metric_goal, - max_failed_trial_count=test_max_failed_trial_count, - search_algorithm=test_search_algorithm, - project=test_project, - location=test_location, - study_display_name_prefix=_TEST_STUDY_NAME_PREFIX, - ) - assert test_tuner.get_model_func().name == test_model_name - assert test_tuner.max_trial_count == test_max_trial_count - assert test_tuner.parallel_trial_count == test_parallel_trial_count - assert test_tuner.hparam_space == test_hparam_space - assert test_tuner.metric_id == test_metric_id - assert test_tuner.metric_goal == test_metric_goal - assert test_tuner.max_failed_trial_count == test_max_failed_trial_count - assert test_tuner.search_algorithm == test_search_algorithm - assert test_tuner.vertex == configs.VertexConfig() - - expected_study_name = f"{_TEST_STUDY_NAME_PREFIX}_0" - expected_study_config = { - "display_name": expected_study_name, - "study_spec": { - "algorithm": test_search_algorithm, - "parameters": test_hparam_space, - "metrics": [{"metric_id": test_metric_id, "goal": test_metric_goal}], - }, - } - expected_parent = f"projects/{test_project}/locations/{test_location}" - mock_create_study.assert_called_once_with( - parent=expected_parent, study=expected_study_config - ) - assert isinstance(test_tuner.vizier_client, VizierServiceClient) - assert test_tuner.study == mock_create_study.return_value - - @pytest.mark.usefixtures("google_auth_mock", "mock_uuid") - def test_vizier_hyper_parameter_tuner_default(self, mock_create_study): - test_model_name = "test_model" - test_max_trial_count = 16 - test_parallel_trial_count = 4 - test_hparam_space = [_TEST_PARAMETER_SPEC] - - def get_model_func(): - model = mock.Mock() - model.name = test_model_name - return model - - test_tuner = VizierHyperparameterTuner( - get_model_func=get_model_func, - max_trial_count=test_max_trial_count, - parallel_trial_count=test_parallel_trial_count, - hparam_space=test_hparam_space, - ) - assert test_tuner.get_model_func().name == test_model_name - assert test_tuner.max_trial_count == test_max_trial_count - assert test_tuner.parallel_trial_count == test_parallel_trial_count - assert test_tuner.hparam_space == test_hparam_space - assert test_tuner.metric_id == "accuracy" - assert test_tuner.metric_goal == "MAXIMIZE" - assert test_tuner.max_failed_trial_count == 0 - assert test_tuner.search_algorithm == "ALGORITHM_UNSPECIFIED" - assert test_tuner.vertex == configs.VertexConfig() - - expected_study_name = "vizier_hyperparameter_tuner_study_0" - expected_study_config = { - "display_name": expected_study_name, - "study_spec": { - "algorithm": "ALGORITHM_UNSPECIFIED", - "parameters": test_hparam_space, - "metrics": [{"metric_id": "accuracy", "goal": "MAXIMIZE"}], - }, - } - expected_parent = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}" - mock_create_study.assert_called_once_with( - parent=expected_parent, study=expected_study_config - ) - assert isinstance(test_tuner.vizier_client, VizierServiceClient) - assert test_tuner.study == mock_create_study.return_value - - def test_vizier_hyper_parameter_tuner_error(self): - def get_model_func(): - return - - test_invalid_metric_id = "invalid_metric_id" - with pytest.raises(ValueError, match="Unsupported metric_id"): - VizierHyperparameterTuner( - get_model_func=get_model_func, - max_trial_count=16, - parallel_trial_count=4, - hparam_space=[_TEST_PARAMETER_SPEC], - metric_id=test_invalid_metric_id, - ) - - @pytest.mark.usefixtures("google_auth_mock", "mock_create_study") - def test_get_best_models(self, mock_list_trials): - vertexai.init(project=_TEST_PROJECT, location=_TEST_LOCATION) - - test_max_trial_count = 16 - test_parallel_trial_count = 4 - test_hparam_space = [_TEST_PARAMETER_SPEC] - - def get_model_func(): - return mock.Mock() - - test_tuner = VizierHyperparameterTuner( - get_model_func=get_model_func, - max_trial_count=test_max_trial_count, - parallel_trial_count=test_parallel_trial_count, - hparam_space=test_hparam_space, - ) - test_tuner.models["trial_0"] = get_model_func() - test_tuner.models["trial_1"] = get_model_func() - test_tuner.models["trial_2"] = get_model_func() - test_tuner.models["trial_3"] = get_model_func() - assert test_tuner.get_best_models(2) == [ - test_tuner.models["trial_2"], - test_tuner.models["trial_0"], - ] - mock_list_trials.assert_called_once_with({"parent": "test_study"}) - - @pytest.mark.usefixtures("google_auth_mock", "mock_create_study") - def test_create_train_and_test_split_x_and_y(self): - x = pd.DataFrame( - { - "col_0": np.array([0.1] * 100), - "col_1": np.array([0.2] * 100), - } - ) - y = pd.DataFrame( - { - "target": np.array([0.3] * 100), - } - ) - - def get_model_func(): - return mock.Mock() - - test_tuner = VizierHyperparameterTuner( - get_model_func=get_model_func, - max_trial_count=1, - parallel_trial_count=1, - hparam_space=[], - ) - x_train, x_test, y_train, y_test = test_tuner._create_train_and_test_splits( - x, y - ) - assert x_train.shape == (75, 2) - assert list(x_train.columns) == ["col_0", "col_1"] - assert x_test.shape == (25, 2) - assert list(x_test.columns) == ["col_0", "col_1"] - assert y_train.shape == (75, 1) - assert list(y_train.columns) == ["target"] - assert y_test.shape == (25, 1) - assert list(y_test.columns) == ["target"] - - @pytest.mark.usefixtures("google_auth_mock", "mock_create_study") - def test_create_train_and_test_split_only_x(self): - x = pd.DataFrame( - { - "col_0": np.array([0.1] * 100), - "col_1": np.array([0.2] * 100), - "target": np.array([0.3] * 100), - } - ) - - def get_model_func(): - return mock.Mock() - - test_tuner = VizierHyperparameterTuner( - get_model_func=get_model_func, - max_trial_count=1, - parallel_trial_count=1, - hparam_space=[], - ) - x_train, x_test, y_train, y_test = test_tuner._create_train_and_test_splits( - x, "target", test_fraction=0.2 - ) - assert x_train.shape == (80, 3) - assert list(x_train.columns) == ["col_0", "col_1", "target"] - assert x_test.shape == (20, 2) - assert list(x_test.columns) == ["col_0", "col_1"] - assert not y_train - assert y_test.shape == (20, 1) - assert list(y_test.columns) == ["target"] - - @pytest.mark.parametrize( - "test_fraction", - [-0.2, 0, 1, 1.2], - ) - @pytest.mark.usefixtures("google_auth_mock", "mock_create_study") - def test_create_train_and_test_split_invalid_test_fraction(self, test_fraction): - def get_model_func(): - return mock.Mock() - - test_tuner = VizierHyperparameterTuner( - get_model_func=get_model_func, - max_trial_count=1, - parallel_trial_count=1, - hparam_space=[], - ) - - err_msg = f"test_fraction must be greater than 0 and less than 1 but was {test_fraction}" - with pytest.raises(ValueError, match=err_msg): - test_tuner._create_train_and_test_splits( - pd.DataFrame(), pd.DataFrame(), test_fraction=test_fraction - ) - - @pytest.mark.parametrize( - "metric_id,expected_value", - [ - ( - "roc_auc", - sklearn.metrics.roc_auc_score( - _TEST_Y_TEST_CLASSIFICATION_BINARY, - _TEST_Y_PRED_CLASSIFICATION_BINARY, - ), - ), - ( - "f1", - sklearn.metrics.f1_score( - _TEST_Y_TEST_CLASSIFICATION_BINARY, - _TEST_Y_PRED_CLASSIFICATION_BINARY, - ), - ), - ( - "precision", - sklearn.metrics.precision_score( - _TEST_Y_TEST_CLASSIFICATION_BINARY, - _TEST_Y_PRED_CLASSIFICATION_BINARY, - ), - ), - ( - "recall", - sklearn.metrics.recall_score( - _TEST_Y_TEST_CLASSIFICATION_BINARY, - _TEST_Y_PRED_CLASSIFICATION_BINARY, - ), - ), - ( - "accuracy", - sklearn.metrics.accuracy_score( - _TEST_Y_TEST_CLASSIFICATION_BINARY, - _TEST_Y_PRED_CLASSIFICATION_BINARY, - ), - ), - ], - ) - @pytest.mark.usefixtures("google_auth_mock", "mock_create_study") - def test_evaluate_model_binary_classification( - self, - metric_id, - expected_value, - mock_binary_classifier, - ): - def get_model_func(): - return mock.Mock() - - test_tuner = VizierHyperparameterTuner( - get_model_func=get_model_func, - max_trial_count=1, - parallel_trial_count=1, - hparam_space=[], - metric_id=metric_id, - metric_goal="MAXIMIZE", - ) - test_model, test_value = test_tuner._evaluate_model( - mock_binary_classifier, - _TEST_X_TEST, - _TEST_Y_TEST_CLASSIFICATION_BINARY, - ) - assert test_value == expected_value - assert test_model == mock_binary_classifier - - @pytest.mark.parametrize( - "metric_id,expected_value", - [ - ( - "accuracy", - sklearn.metrics.accuracy_score( - _TEST_Y_TEST_CLASSIFICATION_MULTI_CLASS, - _TEST_Y_PRED_CLASSIFICATION_MULTI_CLASS, - ), - ), - ], - ) - @pytest.mark.usefixtures("google_auth_mock", "mock_create_study") - def test_evaluate_model_multi_class_classification( - self, - metric_id, - expected_value, - mock_multi_class_classifier, - ): - def get_model_func(): - return mock.Mock() - - test_tuner = VizierHyperparameterTuner( - get_model_func=get_model_func, - max_trial_count=1, - parallel_trial_count=1, - hparam_space=[], - metric_id=metric_id, - metric_goal="MAXIMIZE", - ) - test_model, test_value = test_tuner._evaluate_model( - mock_multi_class_classifier, - _TEST_X_TEST, - _TEST_Y_TEST_CLASSIFICATION_MULTI_CLASS, - ) - assert test_value == expected_value - assert test_model == mock_multi_class_classifier - - @pytest.mark.parametrize( - "metric_id,metric_goal,expected_value", - [ - ( - "mae", - "MINIMIZE", - sklearn.metrics.mean_absolute_error( - _TEST_Y_TEST_REGRESSION, _TEST_Y_PRED_REGRESSION - ), - ), - ( - "mape", - "MINIMIZE", - sklearn.metrics.mean_absolute_percentage_error( - _TEST_Y_TEST_REGRESSION, _TEST_Y_PRED_REGRESSION - ), - ), - ( - "r2", - "MAXIMIZE", - sklearn.metrics.r2_score( - _TEST_Y_TEST_REGRESSION, _TEST_Y_PRED_REGRESSION - ), - ), - ( - "rmse", - "MINIMIZE", - sklearn.metrics.mean_squared_error( - _TEST_Y_TEST_REGRESSION, _TEST_Y_PRED_REGRESSION, squared=False - ), - ), - ( - "rmsle", - "MINIMIZE", - sklearn.metrics.mean_squared_log_error( - _TEST_Y_TEST_REGRESSION, _TEST_Y_PRED_REGRESSION, squared=False - ), - ), - ( - "mse", - "MINIMIZE", - sklearn.metrics.mean_squared_error( - _TEST_Y_TEST_REGRESSION, _TEST_Y_PRED_REGRESSION - ), - ), - ], - ) - @pytest.mark.usefixtures("google_auth_mock", "mock_create_study") - def test_evaluate_model_regression( - self, - metric_id, - metric_goal, - expected_value, - mock_regressor, - ): - def get_model_func(): - return mock.Mock() - - test_tuner = VizierHyperparameterTuner( - get_model_func=get_model_func, - max_trial_count=1, - parallel_trial_count=1, - hparam_space=[], - metric_id=metric_id, - metric_goal=metric_goal, - ) - test_model, test_value = test_tuner._evaluate_model( - mock_regressor, _TEST_X_TEST, _TEST_Y_TEST_REGRESSION - ) - assert test_value == expected_value - assert test_model == mock_regressor - - @pytest.mark.usefixtures("google_auth_mock", "mock_create_study") - def test_evaluate_model_custom_metric( - self, - mock_model_custom_metric, - ): - def get_model_func(): - return mock.Mock() - - test_tuner = VizierHyperparameterTuner( - get_model_func=get_model_func, - max_trial_count=1, - parallel_trial_count=1, - hparam_space=[], - metric_id="custom", - ) - test_model, test_value = test_tuner._evaluate_model( - mock_model_custom_metric, - _TEST_X_TEST, - _TEST_Y_TEST_REGRESSION, - ) - assert test_value == _TEST_CUSTOM_METRIC_VALUE - assert test_model == mock_model_custom_metric - - @pytest.mark.usefixtures("google_auth_mock", "mock_create_study") - def test_evaluate_model_invalid(self): - def get_model_func(): - return mock.Mock() - - test_tuner = VizierHyperparameterTuner( - get_model_func=get_model_func, - max_trial_count=1, - parallel_trial_count=1, - hparam_space=[], - ) - test_tuner.metric_id = "invalid_metric_id" - with pytest.raises(ValueError, match="Unsupported metric_id"): - test_tuner._evaluate_model( - "model", - pd.DataFrame(), - pd.DataFrame(), - ) - - @pytest.mark.parametrize( - "metric_id,expected_value", - [ - ( - "roc_auc", - sklearn.metrics.roc_auc_score( - _TEST_Y_TEST_CLASSIFICATION_BINARY_KERAS, - _TEST_Y_PRED_CLASSIFICATION_BINARY_KERAS_TRANSFORMED, - ), - ), - ( - "f1", - sklearn.metrics.f1_score( - _TEST_Y_TEST_CLASSIFICATION_BINARY_KERAS, - _TEST_Y_PRED_CLASSIFICATION_BINARY_KERAS_TRANSFORMED, - ), - ), - ( - "precision", - sklearn.metrics.precision_score( - _TEST_Y_TEST_CLASSIFICATION_BINARY_KERAS, - _TEST_Y_PRED_CLASSIFICATION_BINARY_KERAS_TRANSFORMED, - ), - ), - ( - "recall", - sklearn.metrics.recall_score( - _TEST_Y_TEST_CLASSIFICATION_BINARY_KERAS, - _TEST_Y_PRED_CLASSIFICATION_BINARY_KERAS_TRANSFORMED, - ), - ), - ( - "accuracy", - sklearn.metrics.accuracy_score( - _TEST_Y_TEST_CLASSIFICATION_BINARY_KERAS, - _TEST_Y_PRED_CLASSIFICATION_BINARY_KERAS_TRANSFORMED, - ), - ), - ], - ) - @pytest.mark.usefixtures("google_auth_mock", "mock_create_study") - def test_evaluate_keras_model_binary_classification( - self, metric_id, expected_value, mock_keras_classifier - ): - def get_model_func(): - return mock.Mock - - test_tuner = VizierHyperparameterTuner( - get_model_func=get_model_func, - max_trial_count=1, - parallel_trial_count=1, - hparam_space=[], - metric_id=metric_id, - metric_goal="MAXIMIZE", - ) - mock_keras_classifier.predict.return_value = ( - _TEST_Y_PRED_CLASSIFICATION_BINARY_KERAS - ) - - test_model, test_value = test_tuner._evaluate_model( - mock_keras_classifier, - _TEST_X_TEST, - _TEST_Y_TEST_CLASSIFICATION_BINARY_KERAS, - ) - assert test_value == expected_value - assert test_model == mock_keras_classifier - - @pytest.mark.parametrize( - "metric_id,expected_value", - [ - ( - "accuracy", - sklearn.metrics.accuracy_score( - _TEST_Y_TEST_CLASSIFICATION_MULTI_CLASS_KERAS, - _TEST_Y_PRED_CLASSIFICATION_MULTI_CLASS_KERAS_TRANSFORMED, - ), - ), - ], - ) - @pytest.mark.usefixtures("google_auth_mock", "mock_create_study") - def test_evaluate_keras_model_multi_class_classification( - self, - metric_id, - expected_value, - mock_keras_classifier, - ): - def get_model_func(): - return mock.Mock() - - test_tuner = VizierHyperparameterTuner( - get_model_func=get_model_func, - max_trial_count=1, - parallel_trial_count=1, - hparam_space=[], - metric_id=metric_id, - metric_goal="MAXIMIZE", - ) - mock_keras_classifier.predict.return_value = ( - _TEST_Y_PRED_CLASSIFICATION_MULTI_CLASS_KERAS - ) - test_model, test_value = test_tuner._evaluate_model( - mock_keras_classifier, - _TEST_X_TEST, - _TEST_Y_TEST_CLASSIFICATION_MULTI_CLASS_KERAS, - ) - assert test_value == expected_value - assert test_model == mock_keras_classifier - - @pytest.mark.usefixtures("google_auth_mock", "mock_create_study") - def test_add_model_and_report_trial_metrics_feasible( - self, mock_binary_classifier, mock_complete_trial - ): - def get_model_func(): - return mock.Mock() - - test_tuner = VizierHyperparameterTuner( - get_model_func=get_model_func, - max_trial_count=1, - parallel_trial_count=1, - hparam_space=[], - ) - test_trial_name = "trial_0" - test_model = mock_binary_classifier - test_metric_value = 1.0 - test_tuner._add_model_and_report_trial_metrics( - test_trial_name, - (test_model, test_metric_value), - ) - mock_complete_trial.assert_called_once_with( - { - "name": test_trial_name, - "final_measurement": { - "metrics": [{"metric_id": "accuracy", "value": test_metric_value}] - }, - } - ) - assert test_tuner.models == {test_trial_name: mock_binary_classifier} - - @pytest.mark.usefixtures("google_auth_mock", "mock_create_study") - def test_add_model_and_report_trial_metrics_infeasible(self, mock_complete_trial): - def get_model_func(): - return mock.Mock() - - test_tuner = VizierHyperparameterTuner( - get_model_func=get_model_func, - max_trial_count=1, - parallel_trial_count=1, - hparam_space=[], - ) - test_trial_name = "trial_0" - test_tuner._add_model_and_report_trial_metrics( - test_trial_name, - None, - ) - mock_complete_trial.assert_called_once_with( - {"name": test_trial_name, "trial_infeasible": True} - ) - assert test_tuner.models == {} - - @pytest.mark.usefixtures("google_auth_mock", "mock_create_study") - def test_suggest_trials(self, mock_suggest_trials): - test_parallel_trial_count = 4 - - def get_model_func(): - return - - mock_suggest_trials.return_value.result.return_value.trials = [ - Trial(name="trial_0"), - Trial(name="trial_1"), - Trial(name="trial_2"), - Trial(name="trial_3"), - ] - - test_tuner = VizierHyperparameterTuner( - get_model_func=get_model_func, - max_trial_count=16, - parallel_trial_count=test_parallel_trial_count, - hparam_space=[_TEST_PARAMETER_SPEC], - ) - test_suggested_trials = test_tuner._suggest_trials(test_parallel_trial_count) - - expected_suggest_trials_request = { - "parent": "test_study", - "suggestion_count": test_parallel_trial_count, - "client_id": "client", - } - mock_suggest_trials.assert_called_once_with(expected_suggest_trials_request) - assert test_suggested_trials == mock_suggest_trials().result().trials - - @pytest.mark.usefixtures("google_auth_mock", "mock_create_study") - def test_set_model_parameters(self): - def get_model_func(penalty: str, C: float, dual=True): - return _logistic.LogisticRegression(penalty=penalty, C=C, dual=dual) - - hparam_space = [ - { - "parameter_id": "penalty", - "categorical_value_spec": {"values": ["l1", "l2"]}, - }, - { - "parameter_id": "C", - "discrete_value_spec": {"values": [0.002, 0.01, 0.03]}, - }, - {"parameter_id": "extra_1", "discrete_value_spec": {"values": [1, 2, 3]}}, - ] - test_tuner = VizierHyperparameterTuner( - get_model_func=get_model_func, - max_trial_count=16, - parallel_trial_count=4, - hparam_space=hparam_space, - ) - trial = Trial( - name="trial_1", - parameters=[ - Trial.Parameter(parameter_id="penalty", value="elasticnet"), - Trial.Parameter(parameter_id="C", value=0.05), - Trial.Parameter(parameter_id="extra_1", value=1.0), - ], - ) - model, model_runtime_parameters = test_tuner._set_model_parameters( - trial, fixed_runtime_params={"extra_2": 5} - ) - - assert model.C == 0.05 - assert model.dual - assert model.penalty == "elasticnet" - assert model_runtime_parameters == {"extra_1": 1, "extra_2": 5} - - @pytest.mark.usefixtures("google_auth_mock", "mock_create_study") - def test_set_model_parameters_no_runtime_params(self): - def get_model_func(penalty: str, C: float, dual=True): - return _logistic.LogisticRegression(penalty=penalty, C=C, dual=dual) - - hparam_space = [ - { - "parameter_id": "penalty", - "categorical_value_spec": {"values": ["l1", "l2"]}, - }, - { - "parameter_id": "C", - "discrete_value_spec": {"values": [0.002, 0.01, 0.03]}, - }, - {"parameter_id": "extra_1", "discrete_value_spec": {"values": [1, 2, 3]}}, - ] - - test_tuner = VizierHyperparameterTuner( - get_model_func=get_model_func, - max_trial_count=16, - parallel_trial_count=4, - hparam_space=hparam_space, - ) - trial = Trial( - name="trial_1", - parameters=[ - Trial.Parameter(parameter_id="penalty", value="elasticnet"), - Trial.Parameter(parameter_id="C", value=0.05), - Trial.Parameter(parameter_id="dual", value=False), - ], - ) - model, model_runtime_parameters = test_tuner._set_model_parameters(trial) - - assert model.C == 0.05 - assert not model.dual - assert model.penalty == "elasticnet" - assert not model_runtime_parameters - - @pytest.mark.usefixtures("google_auth_mock", "mock_create_study") - def test_get_vertex_model_train_method_and_params(self): - vertexai.init(project=_TEST_PROJECT, location=_TEST_LOCATION) - vertexai.preview.init(remote=False) - - class TestVertexModel(remote.VertexModel): - @vertexai.preview.developer.mark.train( - remote_config=_TEST_TRAINING_CONFIG, - ) - def train( - self, - x, - y, - x_train, - y_train, - x_test, - y_test, - training_data, - validation_data, - X, - X_train, - X_test, - ): - return - - def get_model_func(): - return TestVertexModel() - - test_tuner = VizierHyperparameterTuner( - get_model_func=get_model_func, - max_trial_count=16, - parallel_trial_count=4, - hparam_space=[], - ) - test_model = get_model_func() - ( - test_train_method, - test_data_params, - ) = test_tuner._get_vertex_model_train_method_and_params( - test_model, - _TEST_X_TRAIN, - _TEST_Y_TRAIN, - _TEST_X_TEST, - _TEST_Y_TEST_CLASSIFICATION_BINARY, - _TEST_TRIAL_NAME, - ) - assert test_train_method == test_model.train - assert set(test_data_params.keys()) == set( - [ - "x", - "y", - "x_train", - "y_train", - "x_test", - "y_test", - "training_data", - "validation_data", - "X", - "X_train", - "X_test", - ] - ) - assert test_data_params["x"].equals(_TEST_X_TRAIN) - assert test_data_params["y"].equals(_TEST_Y_TRAIN) - assert test_data_params["x_train"].equals(_TEST_X_TRAIN) - assert test_data_params["y_train"].equals(_TEST_Y_TRAIN) - assert test_data_params["x_test"].equals(_TEST_X_TEST) - assert test_data_params["y_test"].equals(_TEST_Y_TEST_CLASSIFICATION_BINARY) - assert test_data_params["training_data"].equals(_TEST_X_TRAIN) - assert test_data_params["validation_data"].equals(_TEST_VALIDATION_DATA) - assert test_data_params["X"].equals(_TEST_X_TRAIN) - assert test_data_params["X_train"].equals(_TEST_X_TRAIN) - assert test_data_params["X_test"].equals(_TEST_X_TEST) - - # staging_bucket is not overriden in local mode. - assert ( - test_train_method.vertex.remote_config.staging_bucket - == _TEST_STAGING_BUCKET - ) - - @pytest.mark.usefixtures("google_auth_mock", "mock_create_study") - def test_get_vertex_model_train_method_and_params_no_y_train(self): - vertexai.init(project=_TEST_PROJECT, location=_TEST_LOCATION) - - class TestVertexModel(remote.VertexModel): - @vertexai.preview.developer.mark.train( - remote_config=_TEST_TRAINING_CONFIG, - ) - def train(self, training_data, validation_data): - return - - def get_model_func(): - return TestVertexModel() - - test_tuner = VizierHyperparameterTuner( - get_model_func=get_model_func, - max_trial_count=16, - parallel_trial_count=4, - hparam_space=[], - ) - test_model = get_model_func() - ( - test_train_method, - test_data_params, - ) = test_tuner._get_vertex_model_train_method_and_params( - test_model, - _TEST_TRAINING_DATA, - None, - _TEST_X_TEST, - _TEST_Y_TEST_CLASSIFICATION_BINARY, - _TEST_TRIAL_NAME, - ) - assert test_train_method == test_model.train - assert set(test_data_params.keys()) == set(["training_data", "validation_data"]) - assert test_data_params["training_data"].equals(_TEST_TRAINING_DATA) - assert test_data_params["validation_data"].equals(_TEST_VALIDATION_DATA) - - @pytest.mark.parametrize( - "get_model_func", [get_test_trainer_a, get_test_remote_container_trainer] - ) - @pytest.mark.usefixtures("google_auth_mock", "mock_create_study") - def test_get_vertex_model_train_method_and_params_remote_staging_bucket( - self, get_model_func - ): - vertexai.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - staging_bucket=_TEST_STAGING_BUCKET, - ) - vertexai.preview.init(remote=True) - test_tuner = VizierHyperparameterTuner( - get_model_func=get_model_func, - max_trial_count=16, - parallel_trial_count=4, - hparam_space=[], - ) - test_model = get_model_func() - test_train_method, _ = test_tuner._get_vertex_model_train_method_and_params( - test_model, - _TEST_X_TRAIN, - _TEST_Y_TRAIN, - _TEST_X_TEST, - _TEST_Y_TEST_CLASSIFICATION_BINARY, - _TEST_TRIAL_NAME, - ) - assert ( - test_train_method.vertex.remote_config.staging_bucket - == _TEST_TRIAL_STAGING_BUCKET - ) - - @pytest.mark.usefixtures("google_auth_mock", "mock_create_study") - def test_get_vertex_model_train_method_and_params_no_remote_executable(self): - class TestVertexModel(remote.VertexModel): - def train(self, x, y): - return - - @vertexai.preview.developer.mark.predict() - def predict(self, x): - return - - def get_model_func(): - return TestVertexModel() - - vertexai.init(project=_TEST_PROJECT, location=_TEST_LOCATION) - test_tuner = VizierHyperparameterTuner( - get_model_func=get_model_func, - max_trial_count=16, - parallel_trial_count=4, - hparam_space=[], - ) - test_model = get_model_func() - with pytest.raises(ValueError, match="No remote executable train method"): - test_tuner._get_vertex_model_train_method_and_params( - test_model, - _TEST_X_TRAIN, - _TEST_Y_TRAIN, - _TEST_X_TEST, - _TEST_Y_TEST_CLASSIFICATION_BINARY, - _TEST_TRIAL_NAME, - ) - - @pytest.mark.parametrize( - "get_model_func,x_train,y_train,x_test,y_test", - [ - ( - get_test_trainer_a, - _TEST_X_TRAIN, - None, - _TEST_Y_TRAIN, - _TEST_Y_TEST_CLASSIFICATION_BINARY, - ), - ( - get_test_trainer_b, - _TEST_X_TRAIN, - _TEST_X_TEST, - _TEST_Y_TRAIN, - _TEST_Y_TEST_CLASSIFICATION_BINARY, - ), - ], - ) - @pytest.mark.usefixtures("google_auth_mock", "mock_create_study") - def test_run_trial_vertex_model_train( - self, - get_model_func, - x_train, - y_train, - x_test, - y_test, - ): - # For unit tests only test local mode. - vertexai.init(project=_TEST_PROJECT, location=_TEST_LOCATION) - vertexai.preview.init(remote=False) - - test_tuner = VizierHyperparameterTuner( - get_model_func=get_model_func, - max_trial_count=16, - parallel_trial_count=4, - hparam_space=[], - ) - test_trial = Trial(name="trial_0", parameters=[]) - test_model, test_metric_value = test_tuner._run_trial( - x_train=x_train, - y_train=y_train, - x_test=x_test, - y_test=y_test, - trial=test_trial, - ) - assert isinstance(test_model, type(get_model_func())) - test_model.predict.assert_called_once_with(x_test) - assert test_metric_value == sklearn.metrics.accuracy_score( - _TEST_Y_TEST_CLASSIFICATION_BINARY, - _TEST_Y_PRED_CLASSIFICATION_BINARY, - ) - - @pytest.mark.usefixtures( - "google_auth_mock", - "mock_create_study", - "mock_blob_upload_from_filename", - "mock_create_custom_job", - "mock_get_custom_job_succeeded", - ) - def test_run_trial_vertex_model_remote_container_train(self): - vertexai.init(project=_TEST_PROJECT, location=_TEST_LOCATION) - vertexai.preview.init(remote=True) - - test_tuner = VizierHyperparameterTuner( - get_model_func=get_test_remote_container_trainer, - max_trial_count=16, - parallel_trial_count=4, - hparam_space=[], - ) - test_trial = Trial(name="trial_0", parameters=[]) - test_model, test_metric_value = test_tuner._run_trial( - x_train=_TEST_TRAINING_DATA, - y_train=None, - x_test=_TEST_X_TEST, - y_test=_TEST_Y_TEST_CLASSIFICATION_BINARY, - trial=test_trial, - ) - assert isinstance(test_model, TestRemoteContainerTrainer) - test_model.predict.assert_called_once_with(_TEST_X_TEST) - assert test_metric_value == sklearn.metrics.accuracy_score( - _TEST_Y_TEST_CLASSIFICATION_BINARY, - _TEST_Y_PRED_CLASSIFICATION_BINARY, - ) - - @pytest.mark.usefixtures("google_auth_mock", "mock_create_study") - def test_run_trial_infeasible(self): - vertexai.init(project=_TEST_PROJECT, location=_TEST_LOCATION) - vertexai.preview.init(remote=True) - - class TestTrainer(remote.VertexModel): - @vertexai.preview.developer.mark.train( - remote_config=_TEST_TRAINING_CONFIG, - ) - def train(self, x_train, y_train, x_test, y_test): - raise RuntimeError() - - def get_model_func(): - return TestTrainer() - - test_tuner = VizierHyperparameterTuner( - get_model_func=get_model_func, - max_trial_count=16, - parallel_trial_count=4, - hparam_space=[], - ) - test_trial = Trial(name="trial_0", parameters=[]) - trial_output = test_tuner._run_trial( - x_train=_TEST_X_TRAIN, - y_train=_TEST_Y_TRAIN, - x_test=_TEST_X_TEST, - y_test=_TEST_Y_TEST_REGRESSION, - trial=test_trial, - ) - assert not trial_output - - @pytest.mark.usefixtures("google_auth_mock", "mock_create_study") - def test_run_trial_unsupported_model_type(self): - def get_model_func(): - return mock.Mock() - - test_tuner = VizierHyperparameterTuner( - get_model_func=get_model_func, - max_trial_count=16, - parallel_trial_count=4, - hparam_space=[], - ) - test_trial = Trial(name="trial_0", parameters=[]) - with pytest.raises(ValueError, match="Unsupported model type"): - test_tuner._run_trial( - x_train=_TEST_X_TRAIN, - y_train=_TEST_Y_TRAIN, - x_test=_TEST_X_TEST, - y_test=_TEST_Y_TEST_REGRESSION, - trial=test_trial, - ) - - @pytest.mark.usefixtures("google_auth_mock", "mock_uuid", "mock_create_study") - def test_fit( - self, - mock_executor_map, - mock_suggest_trials, - mock_complete_trial, - ): - def get_model_func(): - return - - mock_suggest_trials.return_value.result.side_effect = [ - SuggestTrialsResponse( - trials=[Trial(name="trial_1"), Trial(name="trial_2")] - ), - SuggestTrialsResponse( - trials=[ - Trial(name="trial_3"), - Trial(name="trial_4"), - ] - ), - ] - model_1, model_2, model_3, model_4 = (mock.Mock() for _ in range(4)) - mock_executor_map.side_effect = [ - [(model_1, 0.01), (model_2, 0.03)], - [(model_3, 0.02), (model_4, 0.05)], - ] - test_tuner = VizierHyperparameterTuner( - get_model_func=get_model_func, - max_trial_count=4, - parallel_trial_count=2, - hparam_space=[], - ) - test_tuner.fit(x=_TEST_X_TEST, y=_TEST_Y_TEST_CLASSIFICATION_BINARY) - - assert mock_suggest_trials.call_count == 2 - assert mock_executor_map.call_count == 2 - # check fixed_runtime_params in first executor.map call is empty - assert not mock_executor_map.call_args_list[0][0][1][0][6] - assert mock_complete_trial.call_count == 4 - assert test_tuner.models == { - "trial_1": model_1, - "trial_2": model_2, - "trial_3": model_3, - "trial_4": model_4, - } - - @pytest.mark.usefixtures("google_auth_mock", "mock_uuid", "mock_create_study") - def test_fit_varying_parallel_trial_count_and_fixed_runtime_params( - self, - mock_executor_map, - mock_suggest_trials, - mock_complete_trial, - ): - def get_model_func(): - return - - mock_suggest_trials.return_value.result.side_effect = [ - SuggestTrialsResponse( - trials=[Trial(name="trial_1"), Trial(name="trial_2")] - ), - SuggestTrialsResponse( - trials=[ - Trial(name="trial_3"), - Trial(name="trial_4"), - ] - ), - SuggestTrialsResponse( - trials=[ - Trial(name="trial_5"), - ] - ), - ] - model_1, model_2, model_3, model_4, model_5 = (mock.Mock() for _ in range(5)) - mock_executor_map.side_effect = [ - [(model_1, 0.01), (model_2, 0.03)], - [(model_3, 0.02), (model_4, 0.05)], - [(model_5, 0.06)], - ] - test_tuner = VizierHyperparameterTuner( - get_model_func=get_model_func, - max_trial_count=5, - parallel_trial_count=2, - hparam_space=[], - ) - test_tuner.fit( - x=_TEST_X_TEST, - y=_TEST_Y_TEST_CLASSIFICATION_BINARY, - x_test=_TEST_X_TEST, - y_test=_TEST_Y_TEST_CLASSIFICATION_BINARY, - num_epochs=5, - ) - - assert mock_suggest_trials.call_count == 3 - assert mock_executor_map.call_count == 3 - # check fixed_runtime_params in first executor.map call is non-empty - assert mock_executor_map.call_args_list[0][0][1][0][6] == {"num_epochs": 5} - assert mock_complete_trial.call_count == 5 - assert test_tuner.models == { - "trial_1": model_1, - "trial_2": model_2, - "trial_3": model_3, - "trial_4": model_4, - "trial_5": model_5, - } - - @pytest.mark.usefixtures("google_auth_mock", "mock_uuid", "mock_create_study") - def test_fit_max_failed_trial_count( - self, - mock_executor_map, - mock_suggest_trials, - mock_complete_trial, - ): - def get_model_func(): - return - - mock_suggest_trials.return_value.result.return_value = SuggestTrialsResponse( - trials=[Trial(name="trial_1")] - ) - - mock_executor_map.return_value = [None] - - test_tuner = VizierHyperparameterTuner( - get_model_func=get_model_func, - max_trial_count=2, - parallel_trial_count=1, - hparam_space=[], - max_failed_trial_count=1, - ) - - with pytest.raises( - ValueError, match="Maximum number of failed trials reached." - ): - test_tuner.fit( - x=_TEST_X_TEST, - y=_TEST_Y_TEST_CLASSIFICATION_BINARY, - num_epochs=5, - ) - - assert mock_suggest_trials.call_count == 1 - assert mock_executor_map.call_count == 1 - # check fixed_runtime_params in first executor.map call is non-empty - assert mock_executor_map.call_args_list[0][0][1][0][6] == {"num_epochs": 5} - assert mock_complete_trial.call_count == 1 - assert not test_tuner.models - - @pytest.mark.usefixtures("google_auth_mock", "mock_uuid", "mock_create_study") - def test_fit_all_trials_failed( - self, - mock_executor_map, - mock_suggest_trials, - mock_complete_trial, - ): - def get_model_func(): - return - - mock_suggest_trials.return_value.result.side_effect = [ - SuggestTrialsResponse(trials=[Trial(name="trial_1")]), - SuggestTrialsResponse(trials=[Trial(name="trial_2")]), - ] - - mock_executor_map.return_value = [None] - - test_tuner = VizierHyperparameterTuner( - get_model_func=get_model_func, - max_trial_count=2, - parallel_trial_count=1, - hparam_space=[], - max_failed_trial_count=0, - ) - - with pytest.raises(ValueError, match="All trials failed."): - test_tuner.fit( - x=_TEST_X_TEST, - y=_TEST_Y_TEST_CLASSIFICATION_BINARY, - ) - - assert mock_suggest_trials.call_count == 2 - assert mock_executor_map.call_count == 2 - assert mock_complete_trial.call_count == 2 - assert not test_tuner.models - - @pytest.mark.usefixtures("google_auth_mock", "mock_uuid", "mock_create_study") - def test_get_model_param_type_mapping(self): - hparam_space = [ - { - "parameter_id": "penalty", - "categorical_value_spec": {"values": ["l1", "l2"]}, - }, - { - "parameter_id": "C", - "discrete_value_spec": {"values": [0.002, 0.01, 0.03]}, - }, - { - "parameter_id": "epochs", - "integer_value_spec": {"min_value": 1, "max_value": 5.0}, - }, - { - "parameter_id": "learning_rate", - "double_value_spec": {"min_value": 1, "max_value": 5}, - }, - ] - test_tuner = VizierHyperparameterTuner( - get_model_func=None, - max_trial_count=16, - parallel_trial_count=4, - hparam_space=hparam_space, - ) - expected_mapping = { - "penalty": str, - "C": float, - "epochs": int, - "learning_rate": float, - } - - assert expected_mapping == test_tuner._get_model_param_type_mapping() - - @pytest.mark.parametrize( - "test_get_model_func,expected_fixed_init_params", - [ - (lambda x, y: None, {"x": _TEST_X_TRAIN, "y": _TEST_Y_TRAIN}), - (lambda X, y: None, {"X": _TEST_X_TRAIN, "y": _TEST_Y_TRAIN}), - ( - lambda x_train, y_train: None, - {"x_train": _TEST_X_TRAIN, "y_train": _TEST_Y_TRAIN}, - ), - ( - lambda X_train, y_train: None, - {"X_train": _TEST_X_TRAIN, "y_train": _TEST_Y_TRAIN}, - ), - ], - ) - @pytest.mark.usefixtures("google_auth_mock", "mock_uuid", "mock_create_study") - def test_fit_get_model_func_params( - self, - test_get_model_func, - expected_fixed_init_params, - mock_executor_map, - mock_suggest_trials, - mock_complete_trial, - ): - mock_suggest_trials.return_value.result.side_effect = [ - SuggestTrialsResponse(trials=[Trial(name="trial_1")]), - SuggestTrialsResponse(trials=[Trial(name="trial_2")]), - SuggestTrialsResponse(trials=[Trial(name="trial_3")]), - SuggestTrialsResponse(trials=[Trial(name="trial_4")]), - ] - model_1, model_2, model_3, model_4 = (mock.Mock() for _ in range(4)) - mock_executor_map.side_effect = [ - [(model_1, 0.01)], - [(model_2, 0.03)], - [(model_3, 0.02)], - [(model_4, 0.05)], - ] - test_tuner = VizierHyperparameterTuner( - get_model_func=test_get_model_func, - max_trial_count=4, - parallel_trial_count=1, - hparam_space=[], - ) - test_tuner.fit( - x=_TEST_X_TRAIN, - y=_TEST_Y_TRAIN, - x_test=_TEST_X_TEST, - y_test=_TEST_Y_TEST_CLASSIFICATION_BINARY, - ) - - assert mock_suggest_trials.call_count == 4 - assert mock_executor_map.call_count == 4 - # check fixed_runtime_params in first executor.map call is empty - assert not mock_executor_map.call_args_list[0][0][1][0][6] - assert mock_complete_trial.call_count == 4 - assert test_tuner.models == { - "trial_1": model_1, - "trial_2": model_2, - "trial_3": model_3, - "trial_4": model_4, - } - - test_map_args = [call_args[0] for call_args in mock_executor_map.call_args_list] - test_fixed_init_params = [] - for map_args in test_map_args: - test_fixed_init_params.append( - [trial_inputs[5] for trial_inputs in map_args[1]] - ) - assert test_fixed_init_params == [ - [expected_fixed_init_params], - [expected_fixed_init_params], - [expected_fixed_init_params], - [expected_fixed_init_params], - ] - - @pytest.mark.usefixtures("google_auth_mock", "mock_create_study") - def test_get_lightning_train_method_and_params_local(self): - vertexai.init(project=_TEST_PROJECT, location=_TEST_LOCATION) - - def get_model_func(): - return - - test_tuner = VizierHyperparameterTuner( - get_model_func=get_model_func, - max_trial_count=4, - parallel_trial_count=2, - hparam_space=[], - ) - test_model = { - "model": mock.Mock(), - "trainer": mock.Mock(), - "train_dataloaders": mock.Mock(), - } - ( - test_train_method, - test_params, - ) = test_tuner._get_lightning_train_method_and_params(test_model, "") - assert test_train_method == test_model["trainer"].fit - assert test_params == { - "model": test_model["model"], - "train_dataloaders": test_model["train_dataloaders"], - } - - @pytest.mark.usefixtures("google_auth_mock", "mock_create_study") - def test_get_lightning_train_method_and_params_remote(self): - vertexai.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - staging_bucket=_TEST_STAGING_BUCKET, - ) - vertexai.preview.init(remote=True) - - def get_model_func(): - return - - test_tuner = VizierHyperparameterTuner( - get_model_func=get_model_func, - max_trial_count=4, - parallel_trial_count=2, - hparam_space=[], - ) - - class TestTrainer: - def fit(self, model, train_dataloaders): - pass - - test_model = { - "model": mock.Mock(), - "trainer": mock.Mock(), - "train_dataloaders": mock.Mock(), - } - - test_model["trainer"].fit = VertexRemoteFunctor( - TestTrainer().fit, remote_executor=training.remote_training - ) - ( - test_train_method, - test_params, - ) = test_tuner._get_lightning_train_method_and_params( - test_model, _TEST_TRIAL_NAME - ) - assert test_params == { - "model": test_model["model"], - "train_dataloaders": test_model["train_dataloaders"], - } - assert test_train_method == test_model["trainer"].fit - assert ( - test_train_method.vertex.remote_config.staging_bucket - == _TEST_TRIAL_STAGING_BUCKET - ) - - @pytest.mark.usefixtures("google_auth_mock", "mock_create_study") - def test_run_trial_lightning( - self, - ): - # For unit tests only test local mode. - vertexai.init(project=_TEST_PROJECT, location=_TEST_LOCATION) - - test_lightning_model = { - "model": mock.Mock(), - "trainer": mock.Mock(), - "train_dataloaders": mock.Mock(), - } - test_lightning_model[ - "model" - ].predict.return_value = _TEST_Y_PRED_CLASSIFICATION_BINARY - - def get_model_func(): - return test_lightning_model - - test_tuner = VizierHyperparameterTuner( - get_model_func=get_model_func, - max_trial_count=16, - parallel_trial_count=4, - hparam_space=[], - ) - test_trial = Trial(name="trial_0", parameters=[]) - test_trained_model, test_metric_value = test_tuner._run_trial( - x_train=_TEST_X_TRAIN, - y_train=_TEST_Y_TRAIN, - x_test=_TEST_X_TEST, - y_test=_TEST_Y_TEST_CLASSIFICATION_BINARY, - trial=test_trial, - fixed_runtime_params={"ckpt_path": "test_ckpt_path"}, - ) - assert test_trained_model == test_lightning_model - test_lightning_model["trainer"].fit.assert_called_once_with( - model=test_lightning_model["model"], - train_dataloaders=test_lightning_model["train_dataloaders"], - ckpt_path="test_ckpt_path", - ) - test_lightning_model["model"].predict.assert_called_once_with(_TEST_X_TEST) - assert test_metric_value == sklearn.metrics.accuracy_score( - _TEST_Y_TEST_CLASSIFICATION_BINARY, - _TEST_Y_PRED_CLASSIFICATION_BINARY, - ) - - @pytest.mark.usefixtures("google_auth_mock", "mock_create_study") - def test_get_keras_train_method_and_params(self): - vertexai.init(project=_TEST_PROJECT, location=_TEST_LOCATION) - vertexai.preview.init(remote=True) - - def get_model_func(): - tf.keras.Sequential = vertexai.preview.remote(tf.keras.Sequential) - model = tf.keras.Sequential( - [tf.keras.layers.Dense(5, input_shape=(4,)), tf.keras.layers.Softmax()] - ) - model.compile(optimizer="adam", loss="mean_squared_error") - model.fit.vertex.remote_config.staging_bucket = _TEST_STAGING_BUCKET - return model - - test_tuner = VizierHyperparameterTuner( - get_model_func=get_model_func, - max_trial_count=16, - parallel_trial_count=4, - hparam_space=[], - ) - test_model = get_model_func() - test_train_method, data_params = test_tuner._get_train_method_and_params( - test_model, - _TEST_X_TRAIN, - _TEST_Y_TRAIN, - _TEST_TRIAL_NAME, - params=["x", "y"], - ) - assert test_train_method._remote_executor == training.remote_training - assert ( - test_train_method.vertex.remote_config.staging_bucket - == _TEST_TRIAL_STAGING_BUCKET - ) - assert data_params == {"x": _TEST_X_TRAIN, "y": _TEST_Y_TRAIN} - - @pytest.mark.usefixtures("google_auth_mock", "mock_create_study") - def test_get_sklearn_train_method_and_params(self): - vertexai.init(project=_TEST_PROJECT, location=_TEST_LOCATION) - vertexai.preview.init(remote=True) - - def get_model_func(): - LogisticRegression = vertexai.preview.remote(_logistic.LogisticRegression) - model = LogisticRegression(penalty="l1") - model.fit.vertex.remote_config.staging_bucket = _TEST_STAGING_BUCKET - return model - - test_tuner = VizierHyperparameterTuner( - get_model_func=get_model_func, - max_trial_count=16, - parallel_trial_count=4, - hparam_space=[], - ) - test_model = get_model_func() - (test_train_method, data_params,) = test_tuner._get_train_method_and_params( - test_model, - _TEST_X_TRAIN, - _TEST_Y_TRAIN, - _TEST_TRIAL_NAME, - params=["X", "y"], - ) - assert test_train_method._remote_executor == training.remote_training - assert ( - test_train_method.vertex.remote_config.staging_bucket - == _TEST_TRIAL_STAGING_BUCKET - ) - assert data_params == {"X": _TEST_X_TRAIN, "y": _TEST_Y_TRAIN} diff --git a/vertexai/preview/__init__.py b/vertexai/preview/__init__.py index 1ab64c481f..5f75a2b9b5 100644 --- a/vertexai/preview/__init__.py +++ b/vertexai/preview/__init__.py @@ -17,24 +17,6 @@ from google.cloud.aiplatform.metadata import metadata -from vertexai.preview import developer -from vertexai.preview import hyperparameter_tuning -from vertexai.preview import initializer -from vertexai.preview import tabular_models -from vertexai.preview._workflow.driver import ( - remote as remote_decorator, -) -from vertexai.preview._workflow.shared import ( - model_utils, -) - - -global_config = initializer.global_config -init = global_config.init -remote = remote_decorator.remote -VertexModel = remote_decorator.VertexModel -register = model_utils.register -from_pretrained = model_utils.from_pretrained # For Vertex AI Experiment. @@ -51,11 +33,6 @@ __all__ = ( - "init", - "remote", - "VertexModel", - "register", - "from_pretrained", "start_run", "end_run", "get_experiment_df", @@ -63,7 +40,4 @@ "log_metrics", "log_time_series_metrics", "log_classification_metrics", - "developer", - "hyperparameter_tuning", - "tabular_models", ) diff --git a/vertexai/preview/_workflow/__init__.py b/vertexai/preview/_workflow/__init__.py deleted file mode 100644 index 875d5556f2..0000000000 --- a/vertexai/preview/_workflow/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -"""The vertexai _workflow module.""" diff --git a/vertexai/preview/_workflow/driver/__init__.py b/vertexai/preview/_workflow/driver/__init__.py deleted file mode 100644 index cdc9030ffa..0000000000 --- a/vertexai/preview/_workflow/driver/__init__.py +++ /dev/null @@ -1,276 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import functools -import inspect -from typing import Any, Callable, Dict, Iterator, Optional, Tuple, Type, TypeVar - -from google.cloud.aiplatform import jobs -import vertexai -from vertexai.preview._workflow import launcher -from vertexai.preview._workflow import shared -from vertexai.preview._workflow.executor import ( - training, - prediction, -) -from vertexai.preview._workflow.executor import ( - remote_container_training, -) - -ModelBase = TypeVar("ModelBase") -ModelVertexSubclass = TypeVar("ModelVertexSubclass", bound=ModelBase) - -_WRAPPED_CLASS_PREFIX = "_Vertex" - - -class VertexRemoteFunctor: - """Functor to be used to wrap methods for remote execution.""" - - def __init__( - self, - method: Callable[..., Any], - remote_executor: Callable[..., Any], - remote_executor_kwargs: Optional[Dict[str, Any]] = None, - ): - """Wraps a method into VertexRemoteFunctor so that the method is remotely executable. - - Example Usage: - ``` - functor = VertexRemoteFunctor(LogisticRegression.fit, training.remote_training) - setattr(LogisticRegression, "fit", functor) - - model = LogisticRegression() - model.fit.vertex.remote_config.staging_bucket = REMOTE_JOB_BUCKET - model.fit.vertex.remote=True - model.fit(X_train, y_train) - ``` - - Args: - method (Callable[..., Any]): - Required. The method to be wrapped. - remote_executor (Callable[..., Any]): - Required. The remote executor for the method. - remote_executor_kwargs (Dict[str, Any]): - Optional. kwargs used in remote executor. - """ - self._method = method - # TODO(b/278074360) Consider multiple levels of configurations. - if inspect.ismethod(method): - # For instance method, instantiate vertex config directly. - self.vertex = shared.configs.VertexConfig() - else: - # For function, instantiate vertex config later, when the method is - # bounded to an instance. - self.vertex = shared.configs.VertexConfig - self._remote_executor = remote_executor - self._remote_executor_kwargs = remote_executor_kwargs or {} - functools.update_wrapper(self, method) - - def __get__(self, instance, owner) -> Any: - # For class and instance method that already instantiate a new functor, - # return self directly - if (instance is None) or isinstance(self.vertex, shared.configs.VertexConfig): - return self - - # Instantiate a new functor for the instance method - functor_with_instance_bound_method = self.__class__( - self._method.__get__(instance, owner), - self._remote_executor, - self._remote_executor_kwargs, - ) - functor_with_instance_bound_method.vertex = self.vertex() - setattr(instance, self._method.__name__, functor_with_instance_bound_method) - return functor_with_instance_bound_method - - def __call__(self, *args, **kwargs) -> Any: - bound_args = inspect.signature(self._method).bind(*args, **kwargs) - - # NOTE: may also need to handle the case of - # bound_args.arguments.get("self"), - - invokable = shared._Invokable( - instance=getattr(self._method, "__self__"), - method=self._method, - bound_arguments=bound_args, - remote_executor=self._remote_executor, - remote_executor_kwargs=self._remote_executor_kwargs, - vertex_config=self.vertex, - ) - - return _workflow_driver.invoke(invokable) - - -def _supported_member_iter(instance: Any) -> Iterator[Tuple[str, Callable[..., Any]]]: - """Iterates through known method names and returns matching methods.""" - for attr_name in shared.supported_frameworks.REMOTE_TRAINING_OVERRIDE_LIST: - attr_value = getattr(instance, attr_name, None) - if attr_value: - yield attr_name, attr_value, training.remote_training, None - - for attr_name in shared.supported_frameworks.REMOTE_PREDICTION_OVERRIDE_LIST: - attr_value = getattr(instance, attr_name, None) - if attr_value: - yield attr_name, attr_value, prediction.remote_prediction, None - - -def _patch_class(cls: Type[ModelBase]) -> Type[ModelVertexSubclass]: - """Creates a new class that inherited from original class and add Vertex remote execution support.""" - - if hasattr(cls, "_wrapped_by_vertex"): - return cls - - new_cls = type( - f"{_WRAPPED_CLASS_PREFIX}{cls.__name__}", (cls,), {"_wrapped_by_vertex": True} - ) - for ( - attr_name, - attr_value, - remote_executor, - remote_executor_kwargs, - ) in _supported_member_iter(cls): - setattr( - new_cls, - attr_name, - VertexRemoteFunctor(attr_value, remote_executor, remote_executor_kwargs), - ) - - return new_cls - - -def _rewrapper( - instance: Any, - wrapped_class: Any, - config_map: Dict[str, shared.configs.VertexConfig], -): - """Rewraps in place instances after remote execution has completed. - - Args: - instance (Any): - Required. Instance to rewrap. - wrapped_class (Any): - Required. The class type that the instance will be wrapped into. - config_map (Dict[str, shared.configs.VertexConfig]): - Required. Instance of config before unwrapping. Maintains - the config after wrapping. - """ - instance.__class__ = wrapped_class - for attr_name, ( - vertex_config, - remote_executor, - remote_executor_kwargs, - ) in config_map.items(): - method = getattr(instance, attr_name) - if isinstance(method, VertexRemoteFunctor): - method.vertex = vertex_config - setattr(instance, attr_name, method) - else: - functor = VertexRemoteFunctor( - method, remote_executor, remote_executor_kwargs - ) - functor.vertex = vertex_config - setattr(instance, attr_name, functor) - - -def _unwrapper(instance: Any) -> Callable[..., Any]: - """Unwraps all Vertex functor method. - - This should be done before locally executing or remotely executing. - """ - current_class = instance.__class__ - super_class = current_class.__mro__[1] - wrapped_in_place = ( - current_class.__name__ != f"{_WRAPPED_CLASS_PREFIX}{super_class.__name__}" - ) - - config_map = dict() - - if not wrapped_in_place: - for ( - attr_name, - attr_value, - remote_executor, - remote_executor_kwargs, - ) in _supported_member_iter(instance): - if isinstance(attr_value, VertexRemoteFunctor): - config_map[attr_name] = ( - attr_value.vertex, - remote_executor, - remote_executor_kwargs, - ) - setattr(instance, attr_name, attr_value._method) - - instance.__class__ = super_class - - else: - for attr_name, attr_value in inspect.getmembers(instance): - if isinstance(attr_value, VertexRemoteFunctor): - config_map[attr_name] = ( - attr_value.vertex, - attr_value._remote_executor, - attr_value._remote_executor_kwargs, - ) - setattr(instance, attr_name, attr_value._method) - - return functools.partial( - _rewrapper, wrapped_class=current_class, config_map=config_map - ) - - -class _WorkFlowDriver: - def __init__(self): - self._launcher = launcher._WorkflowLauncher() - - def invoke(self, invokable: shared._Invokable) -> Any: - """ - Wrapper should forward implementation to this method. - - NOTE: Not threadsafe w.r.t the instance. - """ - - rewrapper = None - # unwrap - if ( - invokable.instance is not None - and invokable.remote_executor is not remote_container_training.train - ): - rewrapper = _unwrapper(invokable.instance) - - result = self._launch(invokable, rewrapper) - - # rewrap the original instance - if rewrapper and invokable.instance is not None: - rewrapper(invokable.instance) - # also rewrap the result if the result is an estimator not a dataset - if rewrapper and isinstance(result, type(invokable.instance)): - rewrapper(result) - - if hasattr(result, "state") and result.state in jobs._JOB_ERROR_STATES: - raise RuntimeError("Remote job failed with:\n%s" % result.error) - - return result - - def _launch(self, invokable: shared._Invokable, rewrapper: Any) -> Any: - """ - Launches an invokable. - """ - return self._launcher.launch( - invokable=invokable, - global_remote=vertexai.preview.global_config.remote, - rewrapper=rewrapper, - ) - - -_workflow_driver = _WorkFlowDriver() diff --git a/vertexai/preview/_workflow/driver/remote.py b/vertexai/preview/_workflow/driver/remote.py deleted file mode 100644 index b26a12b575..0000000000 --- a/vertexai/preview/_workflow/driver/remote.py +++ /dev/null @@ -1,115 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import abc -import inspect -from typing import Any, Callable, Dict, Optional, Type -import warnings -from vertexai.preview._workflow import driver -from vertexai.preview._workflow.executor import ( - training, -) -from vertexai.preview._workflow.serialization_engine import ( - any_serializer, -) -from vertexai.preview._workflow.shared import ( - constants, - supported_frameworks, -) -from vertexai.preview.developer import remote_specs - - -def remote_method_decorator( - method: Callable[..., Any], - remote_executor: Callable[..., Any], - remote_executor_kwargs: Optional[Dict[str, Any]] = None, -) -> Callable[..., Any]: - """Wraps methods as Functor object to support configuration on method.""" - return driver.VertexRemoteFunctor(method, remote_executor, remote_executor_kwargs) - - -warnings.warn(constants._V2_0_WARNING_MSG, DeprecationWarning, stacklevel=1) - - -def remote_class_decorator(cls: Type) -> Type: - """Add Vertex attributes to a class object.""" - - if not supported_frameworks._is_oss(cls): - raise ValueError( - f"Class {cls.__name__} not supported. " - "Currently support remote execution on " - f"{supported_frameworks.REMOTE_FRAMEWORKS} classes." - ) - - return driver._patch_class(cls) - - -def remote(cls_or_method: Any) -> Any: - """Takes a class or method and add Vertex remote execution support. - - ex: - ``` - - LogisticRegression = vertexai.preview.remote(LogisticRegression) - model = LogisticRegression() - model.fit.vertex.remote_config.staging_bucket = REMOTE_JOB_BUCKET - model.fit.vertex.remote=True - model.fit(X_train, y_train) - ``` - - Args: - cls_or_method (Any): - Required. A class or method that will be added Vertex remote - execution support. - - Returns: - A class or method that can be executed remotely. - """ - # Make sure AnySerializer has been instantiated before wrapping any classes. - if any_serializer.AnySerializer not in any_serializer.AnySerializer._instances: - any_serializer.AnySerializer() - - if inspect.isclass(cls_or_method): - return remote_class_decorator(cls_or_method) - else: - return remote_method_decorator(cls_or_method, training.remote_training) - - -class VertexModel(metaclass=abc.ABCMeta): - """mixin class that can be used to add Vertex AI remote execution to a custom model.""" - - def __init__(self): - vertex_wrapper = False - for _, attr_value in inspect.getmembers(self): - if isinstance(attr_value, driver.VertexRemoteFunctor): - vertex_wrapper = True - break - # TODO(b/279631878) Remove this check once we support more decorators. - if not vertex_wrapper: - raise ValueError( - "No method is enabled for Vertex remote training. Please decorator " - "your training methods with `@vertexai.preview.developer.mark.train`." - ) - self._cluster_spec = None - - @property - def cluster_spec(self): - return self._cluster_spec - - @cluster_spec.setter - def cluster_spec(self, cluster_spec: remote_specs._ClusterSpec): - self._cluster_spec = cluster_spec diff --git a/vertexai/preview/_workflow/executor/__init__.py b/vertexai/preview/_workflow/executor/__init__.py deleted file mode 100644 index 28d01815b1..0000000000 --- a/vertexai/preview/_workflow/executor/__init__.py +++ /dev/null @@ -1,54 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from typing import Any - -from vertexai.preview._workflow import shared -from vertexai.preview._workflow.executor import ( - remote_container_training, - training, - prediction, -) - - -class _WorkflowExecutor: - """Executes an invokable either locally or remotely.""" - - def local_execute(self, invokable: shared._Invokable) -> Any: - if invokable.remote_executor is remote_container_training.train: - raise ValueError( - "Remote container train is only supported for remote mode." - ) - return invokable.method( - *invokable.bound_arguments.args, **invokable.bound_arguments.kwargs - ) - - def remote_execute(self, invokable: shared._Invokable, rewrapper: Any) -> Any: - if invokable.remote_executor not in ( - remote_container_training.train, - training.remote_training, - prediction.remote_prediction, - ): - raise ValueError(f"{invokable.remote_executor} is not supported.") - - if invokable.remote_executor == remote_container_training.train: - invokable.remote_executor(invokable) - else: - return invokable.remote_executor(invokable, rewrapper=rewrapper) - - -_workflow_executor = _WorkflowExecutor() diff --git a/vertexai/preview/_workflow/executor/persistent_resource_util.py b/vertexai/preview/_workflow/executor/persistent_resource_util.py deleted file mode 100644 index a3e8ce26ae..0000000000 --- a/vertexai/preview/_workflow/executor/persistent_resource_util.py +++ /dev/null @@ -1,265 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import datetime -import time -from typing import List, Optional - -from google.api_core import exceptions -from google.api_core import gapic_v1 -from google.api_core.client_options import ClientOptions -from google.cloud import aiplatform -from google.cloud.aiplatform import base -from google.cloud.aiplatform_v1beta1.services.persistent_resource_service import ( - PersistentResourceServiceClient, -) -from google.cloud.aiplatform_v1beta1.types import persistent_resource_service -from google.cloud.aiplatform_v1beta1.types.persistent_resource import ( - PersistentResource, -) -from google.cloud.aiplatform_v1beta1.types.persistent_resource import ( - ResourcePool, - ResourceRuntimeSpec, - ServiceAccountSpec, -) -from google.cloud.aiplatform_v1beta1.types.persistent_resource_service import ( - GetPersistentResourceRequest, -) -from vertexai.preview.developer import remote_specs - - -GAPIC_VERSION = aiplatform.__version__ -_LOGGER = base.Logger(__name__) - -_DEFAULT_REPLICA_COUNT = 1 -_DEFAULT_MACHINE_TYPE = "n1-standard-4" -_DEFAULT_DISK_TYPE = "pd-ssd" -_DEFAULT_DISK_SIZE_GB = 100 - - -def _create_persistent_resource_client(location: Optional[str] = "us-central1"): - - client_info = gapic_v1.client_info.ClientInfo( - gapic_version=GAPIC_VERSION, - ) - - api_endpoint = f"{location}-aiplatform.googleapis.com" - - return PersistentResourceServiceClient( - client_options=ClientOptions(api_endpoint=api_endpoint), - client_info=client_info, - ) - - -def cluster_resource_name(project: str, location: str, name: str) -> str: - """Helper method to get persistent resource name.""" - client = _create_persistent_resource_client(location) - return client.persistent_resource_path(project, location, name) - - -def check_persistent_resource( - cluster_resource_name: str, service_account: Optional[str] = None -) -> bool: - """Helper method to check if a persistent resource exists or not. - - Args: - cluster_resource_name: Persistent Resource name. Has the form: - ``projects/my-project/locations/my-region/persistentResource/cluster-name``. - service_account: Service account. - - Returns: - True if a Persistent Resource exists. - - Raises: - ValueError: if existing cluster is not RUNNING. - ValueError: if service account is specified but mismatch with existing cluster. - """ - # Parse resource name to get the location. - locataion = cluster_resource_name.split("/")[3] - client = _create_persistent_resource_client(locataion) - request = GetPersistentResourceRequest( - name=cluster_resource_name, - ) - try: - response = client.get_persistent_resource(request) - except exceptions.NotFound: - return False - - if response.state != PersistentResource.State.RUNNING: - raise ValueError( - "The existing cluster `", - cluster_resource_name, - "` isn't running, please specify a different cluster_name.", - ) - # Check if service account of this existing persistent resource matches initialized one. - existing_cluster_service_account = ( - response.resource_runtime_spec.service_account_spec.service_account - if response.resource_runtime_spec.service_account_spec - else None - ) - - if ( - service_account is not None - and existing_cluster_service_account != service_account - ): - raise ValueError( - "Expect the existing cluster was created with the service account `", - service_account, - "`, but got `", - existing_cluster_service_account, - "` , please ensure service account is consistent with the initialization.", - ) - return True - - -def _default_persistent_resource() -> PersistentResource: - """Default persistent resource.""" - resource_pools = [] - resource_pool = ResourcePool() - resource_pool.replica_count = _DEFAULT_REPLICA_COUNT - resource_pool.machine_spec.machine_type = _DEFAULT_MACHINE_TYPE - resource_pool.disk_spec.boot_disk_type = _DEFAULT_DISK_TYPE - resource_pool.disk_spec.boot_disk_size_gb = _DEFAULT_DISK_SIZE_GB - resource_pools.append(resource_pool) - - return PersistentResource(resource_pools=resource_pools) - - -# TODO(b/294600649) -def _polling_delay(num_attempts: int, time_scale: float) -> datetime.timedelta: - """Computes a delay to the next attempt to poll the Vertex service. - - This does bounded exponential backoff, starting with $time_scale. - If $time_scale == 0, it starts with a small time interval, less than - 1 second. - - Args: - num_attempts: The number of times have we polled and found that the - desired result was not yet available. - time_scale: The shortest polling interval, in seconds, or zero. Zero is - treated as a small interval, less than 1 second. - - Returns: - A recommended delay interval, in seconds. - """ - # The polling schedule is slow initially , and then gets faster until 4 - # attempts (after that the sleeping time remains the same). - small_interval = 30.0 # Seconds - interval = max(time_scale, small_interval) * 0.76 ** min(num_attempts, 4) - return datetime.timedelta(seconds=interval) - - -def _get_persistent_resource(cluster_resource_name: str): - """Get persistent resource. - - Args: - cluster_resource_name: - "projects//locations//persistentResources/". - - Returns: - aiplatform_v1beta1.PersistentResource if state is RUNNING. - - Raises: - ValueError: Invalid cluster resource name. - RuntimeError: Service returns error. - RuntimeError: Cluster resource state is STOPPING. - RuntimeError: Cluster resource state is ERROR. - """ - - # Parse resource name to get the location. - locataion = cluster_resource_name.split("/")[3] - client = _create_persistent_resource_client(locataion) - request = GetPersistentResourceRequest( - name=cluster_resource_name, - ) - - num_attempts = 0 - while True: - try: - response = client.get_persistent_resource(request) - except exceptions.NotFound as e: - raise ValueError("Invalid cluster_resource_name (404 not found).") from e - if response.error.message: - raise RuntimeError("Cluster returned an error.", response.error.message) - - print("Cluster State =", response.state) - if response.state == PersistentResource.State.RUNNING: - return response - elif response.state == PersistentResource.State.STOPPING: - raise RuntimeError("The cluster is stopping.") - elif response.state == PersistentResource.State.ERROR: - raise RuntimeError("The cluster encountered an error.") - # Polling decay - sleep_time = _polling_delay(num_attempts=num_attempts, time_scale=90.0) - num_attempts += 1 - print( - "Waiting for cluster provisioning; attempt {}; sleeping for {} seconds".format( - num_attempts, sleep_time - ) - ) - time.sleep(sleep_time.total_seconds()) - - -def create_persistent_resource( - cluster_resource_name: str, - resource_pools: Optional[List[remote_specs.ResourcePool]] = None, - service_account: Optional[str] = None, -): - """Create a persistent resource.""" - locataion = cluster_resource_name.split("/")[3] - parent = "/".join(cluster_resource_name.split("/")[:4]) - cluster_name = cluster_resource_name.split("/")[-1] - - client = _create_persistent_resource_client(locataion) - if resource_pools is None: - persistent_resource = _default_persistent_resource() - else: - # convert remote_specs.ResourcePool to GAPIC ResourcePool - pools = [] - for resource_pool in resource_pools: - pool = ResourcePool() - pool.replica_count = resource_pool.replica_count - pool.machine_spec.machine_type = resource_pool.machine_type - pool.machine_spec.accelerator_type = resource_pool.accelerator_type - pool.machine_spec.accelerator_count = resource_pool.accelerator_count - pool.disk_spec.boot_disk_type = resource_pool.boot_disk_type - pool.disk_spec.boot_disk_size_gb = resource_pool.boot_disk_size_gb - pools.append(pool) - - persistent_resource = PersistentResource(resource_pools=pools) - - enable_custom_service_account = True if service_account is not None else False - - resource_runtime_spec = ResourceRuntimeSpec( - service_account_spec=ServiceAccountSpec( - enable_custom_service_account=enable_custom_service_account, - service_account=service_account, - ), - ) - persistent_resource.resource_runtime_spec = resource_runtime_spec - request = persistent_resource_service.CreatePersistentResourceRequest( - parent=parent, - persistent_resource=persistent_resource, - persistent_resource_id=cluster_name, - ) - - try: - _ = client.create_persistent_resource(request) - except Exception as e: - raise ValueError("Failed in cluster creation due to: ", e) from e - - # Check cluster creation progress - response = _get_persistent_resource(cluster_resource_name) - _LOGGER.info(f"Cluster {response.display_name} was created successfully.") diff --git a/vertexai/preview/_workflow/executor/prediction.py b/vertexai/preview/_workflow/executor/prediction.py deleted file mode 100644 index b64f2221ca..0000000000 --- a/vertexai/preview/_workflow/executor/prediction.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any - -from vertexai.preview._workflow import ( - shared, -) -from vertexai.preview._workflow.executor import ( - training, -) - - -def remote_prediction(invokable: shared._Invokable, rewrapper: Any): - """Wrapper function that makes a method executable by Vertex CustomJob.""" - predictions = training.remote_training(invokable=invokable, rewrapper=rewrapper) - return predictions - - -def _online_prediction(invokable: shared._Invokable): - # TODO(b/283292903) Implement online prediction method - raise ValueError("Online prediction is not currently supported.") - - -def _batch_prediction(invokable: shared._Invokable): - # TODO(b/283289019) Implement batch prediction method - raise ValueError("Batch prediction is not currently supported.") diff --git a/vertexai/preview/_workflow/executor/remote_container_training.py b/vertexai/preview/_workflow/executor/remote_container_training.py deleted file mode 100644 index e89fe0bc3b..0000000000 --- a/vertexai/preview/_workflow/executor/remote_container_training.py +++ /dev/null @@ -1,220 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -"""Remote container training and helper functions. -""" -from typing import Any, Dict, List -import uuid - -from google.cloud import aiplatform -from google.cloud.aiplatform.utils import worker_spec_utils -import vertexai -from vertexai.preview._workflow import shared -from vertexai.preview.developer import remote_specs -from vertexai.preview._workflow.shared import model_utils - -# job_dir container argument name -_JOB_DIR = "job_dir" - -# Worker pool specs default value constants -_DEFAULT_REPLICA_COUNT: int = 1 -_DEFAULT_MACHINE_TYPE: str = "n1-standard-4" -_DEFAULT_ACCELERATOR_COUNT: int = 0 -_DEFAULT_ACCELERATOR_TYPE: str = "ACCELERATOR_TYPE_UNSPECIFIED" -_DEFAULT_BOOT_DISK_TYPE: str = "pd-ssd" -_DEFAULT_BOOT_DISK_SIZE_GB: int = 100 - -# Custom job default name -_DEFAULT_DISPLAY_NAME = "remote-fit" - - -def _generate_worker_pool_specs( - image_uri: str, - inputs: List[str], - replica_count: int = _DEFAULT_REPLICA_COUNT, - machine_type: str = _DEFAULT_MACHINE_TYPE, - accelerator_count: int = _DEFAULT_ACCELERATOR_COUNT, - accelerator_type: str = _DEFAULT_ACCELERATOR_TYPE, - boot_disk_type: str = _DEFAULT_BOOT_DISK_TYPE, - boot_disk_size_gb: int = _DEFAULT_BOOT_DISK_SIZE_GB, -) -> List[Dict[str, Any]]: - """Helper function to generate worker pool specs for CustomJob. - - TODO(b/278786170): Use customized worker_pool_specs to specify - replica_count, machine types, number/type of worker pools, etc. for - distributed training. - - Args: - image_uri (str): - Required. The docker image uri for CustomJob. - inputs (List[str]): - Required. A list of inputs for CustomJob. Each item would look like - "--arg_0=value_for_arg_0". - replica_count (int): - Optional. The number of worker replicas. Assigns 1 chief replica and - replica_count - 1 worker replicas. - machine_type (str): - Optional. The type of machine to use for training. - accelerator_count (int): - Optional. The number of accelerators to attach to a worker replica. - accelerator_type (str): - Optional. Hardware accelerator type. One of - ACCELERATOR_TYPE_UNSPECIFIED, NVIDIA_TESLA_K80, NVIDIA_TESLA_P100, - NVIDIA_TESLA_V100, NVIDIA_TESLA_P4, NVIDIA_TESLA_T4 - boot_disk_type (str): - Optional. Type of the boot disk (default is `pd-ssd`). - Valid values: `pd-ssd` (Persistent Disk Solid State Drive) or - `pd-standard` (Persistent Disk Hard Disk Drive). - boot_disk_size_gb (int): - Optional. Size in GB of the boot disk (default is 100GB). - boot disk size must be within the range of [100, 64000]. - - Returns: - A list of worker pool specs in the form of dictionaries. For - replica = 1, there is one worker pool spec. For replica > 1, there are - two worker pool specs. - - Raises: - ValueError if replica_count is less than 1. - """ - if replica_count < 1: - raise ValueError( - "replica_count must be a positive number but is " f"{replica_count}." - ) - - # pylint: disable=protected-access - worker_pool_specs = worker_spec_utils._DistributedTrainingSpec.chief_worker_pool( - replica_count=replica_count, - machine_type=machine_type, - accelerator_count=accelerator_count, - accelerator_type=accelerator_type, - boot_disk_type=boot_disk_type, - boot_disk_size_gb=boot_disk_size_gb, - ).pool_specs - - # Attach a container_spec to each worker pool spec. - for spec in worker_pool_specs: - spec["container_spec"] = { - "image_uri": image_uri, - "args": inputs, - } - - return worker_pool_specs - - -# pylint: disable=protected-access -def train(invokable: shared._Invokable): - """Wrapper function that runs remote container training.""" - training_config = invokable.vertex_config.remote_config - - # user can specify either worker_pool_specs OR machine_type, replica_count etc. - remote_specs._verify_specified_remote_config_values( - training_config.worker_pool_specs, - training_config.machine_type, - training_config.replica_count, - training_config.accelerator_type, - training_config.accelerator_count, - training_config.boot_disk_type, - training_config.boot_disk_size_gb, - ) - - staging_bucket = ( - training_config.staging_bucket or vertexai.preview.global_config.staging_bucket - ) - if not staging_bucket: - raise ValueError( - "No default staging bucket set. " - "Please call `vertexai.init(staging_bucket='gs://my-bucket')." - ) - input_dir = remote_specs._gen_gcs_path(staging_bucket, model_utils._INPUT_DIR) - output_dir = remote_specs._gen_gcs_path(staging_bucket, model_utils._OUTPUT_DIR) - - # Creates a complete set of binding. - instance_binding = invokable.instance._binding - binding = invokable.bound_arguments.arguments - for arg in instance_binding: - binding[arg] = instance_binding[arg] - - # If a container accepts a job_dir argument and the user does not specify - # it, set job_dir based on the staging bucket. - if _JOB_DIR in binding and not binding[_JOB_DIR]: - binding[_JOB_DIR] = remote_specs._gen_gcs_path( - staging_bucket, model_utils._CUSTOM_JOB_DIR - ) - - # Formats arguments. - formatted_args = {} - output_specs = [] - for data in invokable.remote_executor_kwargs["additional_data"]: - if isinstance(data, remote_specs._InputParameterSpec): - formatted_args[data.argument_name] = data.format_arg(input_dir, binding) - elif isinstance(data, remote_specs._OutputParameterSpec): - formatted_args[data.argument_name] = remote_specs._gen_gcs_path( - output_dir, data.argument_name - ) - output_specs.append(data) - else: - raise ValueError(f"Invalid data type {type(data)}.") - inputs = [f"--{key}={val}" for key, val in formatted_args.items()] - - # Launches a custom job. - display_name = training_config.display_name or _DEFAULT_DISPLAY_NAME - if training_config.worker_pool_specs: - worker_pool_specs = remote_specs._prepare_worker_pool_specs( - worker_pool_specs=training_config.worker_pool_specs, - image_uri=invokable.remote_executor_kwargs["image_uri"], - args=inputs, - ) - else: - worker_pool_specs = _generate_worker_pool_specs( - image_uri=invokable.remote_executor_kwargs["image_uri"], - inputs=inputs, - replica_count=(training_config.replica_count or _DEFAULT_REPLICA_COUNT), - machine_type=(training_config.machine_type or _DEFAULT_MACHINE_TYPE), - accelerator_count=( - training_config.accelerator_count or _DEFAULT_ACCELERATOR_COUNT - ), - accelerator_type=( - training_config.accelerator_type or _DEFAULT_ACCELERATOR_TYPE - ), - boot_disk_type=(training_config.boot_disk_type or _DEFAULT_BOOT_DISK_TYPE), - boot_disk_size_gb=( - training_config.boot_disk_size_gb or _DEFAULT_BOOT_DISK_SIZE_GB - ), - ) - - job = aiplatform.CustomJob( - display_name=f"{invokable.instance.__class__.__name__}-{display_name}" - f"-{uuid.uuid4()}", - worker_pool_specs=worker_pool_specs, - base_output_dir=remote_specs._gen_gcs_path( - staging_bucket, model_utils._CUSTOM_JOB_DIR - ), - staging_bucket=remote_specs._gen_gcs_path( - staging_bucket, model_utils._CUSTOM_JOB_DIR - ), - ) - job.run() - - # Sets output values from the custom job. - for data in output_specs: - deserialized_output = data.deserialize_output( - formatted_args[data.argument_name] - ) - invokable.instance.__setattr__(data.name, deserialized_output) - - # Calls the decorated function for post-processing. - return invokable.method( - *invokable.bound_arguments.args, **invokable.bound_arguments.kwargs - ) diff --git a/vertexai/preview/_workflow/executor/training.py b/vertexai/preview/_workflow/executor/training.py deleted file mode 100644 index 189c7371c1..0000000000 --- a/vertexai/preview/_workflow/executor/training.py +++ /dev/null @@ -1,839 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import collections -import datetime -import inspect -import logging -import os -import re -import sys -import time -from typing import Any, Dict, List, Optional, Set, Tuple, Union -import warnings - -from google.api_core import exceptions as api_exceptions -from google.cloud import aiplatform -import vertexai -from google.cloud.aiplatform import base -from google.cloud.aiplatform.preview import jobs -from google.cloud.aiplatform import utils -from google.cloud.aiplatform.metadata import metadata -from google.cloud.aiplatform.utils import resource_manager_utils -from vertexai.preview._workflow import shared -from vertexai.preview._workflow.serialization_engine import ( - any_serializer, -) -from vertexai.preview._workflow.serialization_engine import ( - serializers_base, -) -from vertexai.preview._workflow.shared import constants -from vertexai.preview._workflow.shared import ( - supported_frameworks, -) -from vertexai.preview._workflow.shared import model_utils -from vertexai.preview.developer import remote_specs -from packaging import version - - -try: - from importlib import metadata as importlib_metadata -except ImportError: - import importlib_metadata - -try: - import bigframes as bf - from bigframes.dataframe import DataFrame - - BigframesData = DataFrame -except ImportError: - bf = None - BigframesData = Any - - -try: - from google.cloud import logging as cloud_logging -except ImportError: - cloud_logging = None - - -_LOGGER = base.Logger("vertexai.remote_execution") -_LOG_POLL_INTERVAL = 5 -_LOG_WAIT_INTERVAL = 30 - - -# TODO(b/271855597) Serialize all input args -PASS_THROUGH_ARG_TYPES = [str, int, float, bool] - -VERTEX_AI_DEPENDENCY_PATH = ( - f"google-cloud-aiplatform[preview]=={aiplatform.__version__}" -) -VERTEX_AI_DEPENDENCY_PATH_AUTOLOGGING = ( - f"google-cloud-aiplatform[preview,autologging]=={aiplatform.__version__}" -) - -_DEFAULT_GPU_WORKER_POOL_SPECS = remote_specs.WorkerPoolSpecs( - remote_specs.WorkerPoolSpec(1, "n1-standard-16", 1, "NVIDIA_TESLA_P100"), - remote_specs.WorkerPoolSpec(1, "n1-standard-16", 1, "NVIDIA_TESLA_P100"), -) -_DEFAULT_CPU_WORKER_POOL_SPECS = remote_specs.WorkerPoolSpecs( - remote_specs.WorkerPoolSpec(1, "n1-standard-4"), - remote_specs.WorkerPoolSpec(1, "n1-standard-4"), -) - - -def _get_package_name(requirement: str) -> str: - """Given a requirement specification, returns the package name.""" - return re.match("[a-zA-Z-_]+", requirement).group() - - -def _get_package_extras(requirement: str) -> Set: - """Given a requirement specification, returns the extra component in it.""" - # searching for patterns like [extra1,extra2,...] - extras = re.search(r"\[.*\]", requirement) - if extras: - return set([extra.strip() for extra in extras.group()[1:-1].split(",")]) - return set() - - -def _add_indirect_dependency_versions(direct_requirements: List[str]) -> List[str]: - """Helper method to get versions of libraries in the dep tree.""" - versions = {} - dependencies_and_extras = collections.deque([]) - direct_deps_packages = set() - for direct_requirement in direct_requirements: - package_name = _get_package_name(direct_requirement) - extras = _get_package_extras(direct_requirement) - direct_deps_packages.add(package_name) - try: - versions[package_name] = importlib_metadata.version(package_name) - dependencies_and_extras.append((package_name, extras)) - except importlib_metadata.PackageNotFoundError: - pass - - while dependencies_and_extras: - dependency, extras = dependencies_and_extras.popleft() - child_requirements = importlib_metadata.requires(dependency) - if not child_requirements: - continue - for child_requirement in child_requirements: - child_dependency = _get_package_name(child_requirement) - child_dependency_extras = _get_package_extras(child_requirement) - if child_dependency not in versions: - if "extra" in child_requirement: - # Matching patter "extra == 'extra_component'" in a requirement - # specification like - # "dependency_name (>=1.0.0) ; extra == 'extra_component'" - extra_component = ( - re.search(r"extra == .*", child_requirement) - .group()[len("extra == ") :] - .strip("'") - ) - # If the corresponding extra_component is not in the needed - # extras set of the parent dependency, skip this package - if extra_component not in extras: - continue - try: - versions[child_dependency] = importlib_metadata.version( - child_dependency - ) - dependencies_and_extras.append( - (child_dependency, child_dependency_extras) - ) - except importlib_metadata.PackageNotFoundError: - pass - - return [ - "==".join([package_name, package_version]) if package_version else package_name - for package_name, package_version in versions.items() - if package_name not in direct_deps_packages - ] + direct_requirements - - -def _create_worker_pool_specs( - machine_type: str, - command: str, - image_uri: str, - replica_count: int = 1, - accelerator_type: Optional[str] = None, - accelerator_count: Optional[int] = None, -) -> List[Dict[str, Any]]: - """Helper method to create worker pool specs for CustomJob.""" - worker_pool_specs = [ - { - "machine_spec": { - "machine_type": machine_type, - "accelerator_type": accelerator_type, - "accelerator_count": accelerator_count, - }, - "replica_count": replica_count, - "container_spec": { - "image_uri": image_uri, - "command": command, - "args": [], - }, - } - ] - return worker_pool_specs - - -def _get_worker_pool_specs( - config: shared.configs.RemoteConfig, image_uri: str, command: List[str] -) -> List[Dict[str, Any]]: - """Helper method to return worker_pool_specs based on user specification in training config.""" - if config.enable_distributed: - if config.worker_pool_specs: - # validate user-specified worker_pool_specs support distributed training. - # must be single worker, multi-GPU OR multi-worker, single/multi-GPU - if ( - config.worker_pool_specs.chief.accelerator_count < 2 - and not config.worker_pool_specs.worker - ): - raise ValueError( - "`enable_distributed=True` in Vertex config, but `worker_pool_specs` do not support distributed training." - ) - return remote_specs._prepare_worker_pool_specs( - config.worker_pool_specs, image_uri, command, args=[] - ) - else: - default_worker_pool_specs = ( - _DEFAULT_GPU_WORKER_POOL_SPECS - if config.enable_cuda - else _DEFAULT_CPU_WORKER_POOL_SPECS - ) - return remote_specs._prepare_worker_pool_specs( - default_worker_pool_specs, image_uri, command, args=[] - ) - - if config.worker_pool_specs: - warnings.warn( - "config.worker_pool_specs will not take effect since `enable_distributed=False`." - ) - - if config.enable_cuda: - default_machine_type = "n1-standard-16" - default_accelerator_type = "NVIDIA_TESLA_P100" - default_accelerator_count = 1 - else: - default_machine_type = "n1-standard-4" - default_accelerator_type = None - default_accelerator_count = None - - machine_type = config.machine_type or default_machine_type - accelerator_type = config.accelerator_type or default_accelerator_type - accelerator_count = config.accelerator_count or default_accelerator_count - - return _create_worker_pool_specs( - machine_type=machine_type, - command=command, - image_uri=image_uri, - accelerator_type=accelerator_type, - accelerator_count=accelerator_count, - ) - - -def _common_update_model_inplace(old_estimator, new_estimator): - for attr_name, attr_value in new_estimator.__dict__.items(): - if not attr_name.startswith("__") and not inspect.ismethod( - getattr(old_estimator, attr_name, None) - ): - setattr(old_estimator, attr_name, attr_value) - - -def _update_sklearn_model_inplace(old_estimator, new_estimator): - _common_update_model_inplace(old_estimator, new_estimator) - - -def _update_torch_model_inplace(old_estimator, new_estimator): - # make sure estimators are on the same device - device = next(old_estimator.parameters()).device - new_estimator.to(device) - _common_update_model_inplace(old_estimator, new_estimator) - - -def _update_lightning_trainer_inplace(old_estimator, new_estimator): - _common_update_model_inplace(old_estimator, new_estimator) - - -def _update_keras_model_inplace(old_estimator, new_estimator): - import tensorflow as tf - - @tf.__internal__.tracking.no_automatic_dependency_tracking - def _no_tracking_setattr(instance, name, value): - setattr(instance, name, value) - - for attr_name, attr_value in new_estimator.__dict__.items(): - if not attr_name.startswith("__") and not inspect.ismethod( - getattr(old_estimator, attr_name, None) - ): - # for Keras model, we update self's attributes with a decorated - # setattr. See b/277939758 for the details. - _no_tracking_setattr(old_estimator, attr_name, attr_value) - - -def _get_service_account( - config: shared.configs.RemoteConfig, - autolog: bool, -) -> Optional[str]: - """Helper method to get service account from RemoteConfig.""" - service_account = ( - config.service_account or vertexai.preview.global_config.service_account - ) - if service_account: - if service_account.lower() == "gce": - project = vertexai.preview.global_config.project - project_number = resource_manager_utils.get_project_number(project) - return f"{project_number}-compute@developer.gserviceaccount.com" - else: - return service_account - else: - if autolog: - raise ValueError( - "Service account has to be provided for autologging. You can " - "either use your own service account by setting " - "`model..vertex.remote_config.service_account = `, " - "or use the GCE service account by setting " - "`model..vertex.remote_config.service_account = 'GCE'`." - ) - else: - return None - - -def _dedupe_requirements(requirements: List[str]) -> List[str]: - """Helper method to deduplicate requirements by the package name. - - Args: - requirements (List[str]): - Required. A list of python packages. Can be either "my-package" or - "my-package==1.0.0". - - Returns: - A list of unique python packages. if duplicate in the original list, will - keep the first one. - """ - res = [] - req_names = set() - for req in requirements: - req_name = req.split("==")[0] - if req_name not in req_names: - req_names.add(req_name) - res.append(req) - - return res - - -def _get_remote_logs( - job_id: str, - logger: "google.cloud.logging.Logger", # noqa: F821 - log_time: datetime.datetime, - log_level: str = "INFO", - is_training_log: bool = False, -) -> Tuple[datetime.datetime, bool]: - """Helper method to get CustomJob logs from Cloud Logging. - - Args: - job_id (str): - Required. The resource id of the CustomJob. - logger (cloud_logging.Logger): - Required. A google-cloud-logging Logger object corresponding to the - CustomJob. - log_time (datetime.datetime): - Required. Logs generated after this time will get pulled. - log_level (str): - Optional. Logs greater than or equal to this level will get pulled. - Default is `INFO` level. - is_training_log (bool): - Optional. Indicates if logs after the `log_time` are training logs. - - Returns: - A tuple indicates the end time of logs and whether the training log has - started. - """ - filter_msg = [ - f"resource.labels.job_id={job_id}", - f"severity>={log_level}", - f'timestamp>"{log_time.isoformat()}"', - ] - filter_msg = " AND ".join(filter_msg) - try: - entries = logger.list_entries( - filter_=filter_msg, order_by=cloud_logging.ASCENDING - ) - for entry in entries: - log_time = entry.timestamp - message = entry.payload["message"] - if constants._START_EXECUTION_MSG in message: - is_training_log = True - if is_training_log: - _LOGGER.log(getattr(logging, entry.severity), message) - if constants._END_EXECUTION_MSG in message: - is_training_log = False - - return log_time, is_training_log - - except api_exceptions.ResourceExhausted: - _LOGGER.warning( - "Reach the limit for reading cloud logs per minute. " - f"Will try again in {_LOG_WAIT_INTERVAL} seconds." - ) - time.sleep(_LOG_WAIT_INTERVAL - _LOG_POLL_INTERVAL) - - return log_time, is_training_log - - except api_exceptions.PermissionDenied as e: - _LOGGER.warning( - f"Failed to get logs due to: {e}. " - "Remote execution logging is disabled. " - "Please add 'Logging Admin' role to your principal." - ) - - return None, None - - -def _get_remote_logs_until_complete( - job: Union[str, aiplatform.CustomJob], - start_time: Optional[datetime.datetime] = None, - system_logs: bool = False, -): - """Helper method to get CustomJob logs in real time until the job is complete. - - Args: - job (Union[str, aiplatform.CustomJob]): - Required. A CustomJob ID or `aiplatform.CustomJob` object. - start_time (datetime.datetime): - Optional. Get logs generated after this start time. Default is the - start time of the CustomJob or the current time. - system_logs (bool): - Optional. If set to True, all the logs from remote job will be logged - locally. Otherwise, only training logs will be shown. - - """ - if isinstance(job, str): - job = aiplatform.CustomJob.get(job) - - if not cloud_logging: - _LOGGER.warning( - "google-cloud-logging is not installed, remote execution logging is disabled. " - "To enable logs, call `pip install google-cloud-aiplatform[preview]`." - ) - while job.state not in jobs._JOB_COMPLETE_STATES: - time.sleep(_LOG_POLL_INTERVAL) - - return - - logging_client = cloud_logging.Client(project=job.project) - # TODO(b/295375379): support remote distributed training logs - logger = logging_client.logger("workerpool0-0") - - previous_time = ( - start_time or job.start_time or datetime.datetime.now(tz=datetime.timezone.utc) - ) - is_training_log = system_logs - - while job.state not in jobs._JOB_COMPLETE_STATES: - if previous_time: - previous_time, is_training_log = _get_remote_logs( - job_id=job.name, - logger=logger, - log_time=previous_time, - log_level="INFO", - is_training_log=is_training_log, - ) - time.sleep(_LOG_POLL_INTERVAL) - - if previous_time: - _get_remote_logs( - job_id=job.name, - logger=logger, - log_time=previous_time, - log_level="INFO", - is_training_log=is_training_log, - ) - - -def _set_job_labels(method_name: str) -> Dict[str, str]: - """Helper method to set the label for the CustomJob. - - Remote training, feature transform, and prediction jobs should each have - different labels. - - Args: - method_Name (str): - Required. The method name used to invoke the remote job. - - Returns: - A dictionary of the label key/value to use for the CustomJob. - """ - - if method_name in supported_frameworks.REMOTE_TRAINING_STATEFUL_OVERRIDE_LIST: - return {"trained_by_vertex_ai": "true"} - - if method_name in supported_frameworks.REMOTE_TRAINING_FUNCTIONAL_OVERRIDE_LIST: - return {"feature_transformed_by_vertex_ai": "true"} - - if method_name in supported_frameworks.REMOTE_PREDICTION_OVERRIDE_LIST: - return {"predicted_by_vertex_ai": "true"} - - -def remote_training(invokable: shared._Invokable, rewrapper: Any): - """Wrapper function that makes a method executable by Vertex CustomJob.""" - - self = invokable.instance - method = invokable.method - method_name = method.__name__ - bound_args = invokable.bound_arguments - config = invokable.vertex_config.remote_config - serializer_args = invokable.vertex_config.remote_config.serializer_args - if not isinstance(serializer_args, serializers_base.SerializerArgs): - raise ValueError("serializer_args must be an instance of SerializerArgs.") - - autolog = vertexai.preview.global_config.autolog - service_account = _get_service_account(config, autolog=autolog) - if ( - autolog - and vertexai.preview.global_config.cluster is not None - and (service_account != vertexai.preview.global_config.cluster.service_account) - ): - raise ValueError( - f"The service account for autologging ({service_account}) is mismatched with the cluster's service account ({vertexai.preview.global_config.service_account}). " - ) - if autolog: - vertex_requirements = [ - VERTEX_AI_DEPENDENCY_PATH_AUTOLOGGING, - "absl-py==1.4.0", - ] - else: - vertex_requirements = [ - VERTEX_AI_DEPENDENCY_PATH, - "absl-py==1.4.0", - ] - - requirements = [] - custom_commands = [] - - enable_cuda = config.enable_cuda - - # TODO(b/274979556): consider other approaches to pass around the primitives - pass_through_int_args = {} - pass_through_float_args = {} - pass_through_str_args = {} - pass_through_bool_args = {} - serialized_args = {} - - for arg_name, arg_value in bound_args.arguments.items(): - if arg_name == "self": - pass - elif isinstance(arg_value, int): - pass_through_int_args[arg_name] = arg_value - elif isinstance(arg_value, float): - pass_through_float_args[arg_name] = arg_value - elif isinstance(arg_value, str): - pass_through_str_args[arg_name] = arg_value - elif isinstance(arg_value, bool): - pass_through_bool_args[arg_name] = arg_value - else: - serialized_args[arg_name] = arg_value - - # set base gcs path for the remote job - staging_bucket = ( - config.staging_bucket or vertexai.preview.global_config.staging_bucket - ) - if not staging_bucket: - raise ValueError( - "No default staging bucket set. " - "Please call `vertexai.init(staging_bucket='gs://my-bucket')." - ) - remote_job = f"remote-job-{utils.timestamped_unique_name()}" - remote_job_base_path = os.path.join(staging_bucket, remote_job) - remote_job_input_path = os.path.join(remote_job_base_path, "input") - remote_job_output_path = model_utils._generate_remote_job_output_path( - remote_job_base_path - ) - - detected_framework = None - if supported_frameworks._is_sklearn(self): - detected_framework = "sklearn" - elif supported_frameworks._is_keras(self): - detected_framework = "tensorflow" - # TODO(b/295580335): Investigate Tensorflow 2.13 GPU Hanging - import tensorflow as tf - - accelerator_count = config.accelerator_count if config.accelerator_count else 0 - if ( - version.Version(tf.__version__).base_version >= "2.13.0" - and accelerator_count > 1 - ): - raise ValueError( - f"Currently Tensorflow {tf.__version__} doesn't support multi-gpu training." - ) - elif supported_frameworks._is_torch(self): - detected_framework = "torch" - # TODO(b/296944997): Support remote training on torch<2 - import torch - - if version.Version(torch.__version__).base_version < "2.0.0": - raise ValueError( - f"Currently Vertex remote training doesn't support torch {torch.__version__}. " - "Please use torch>=2.0.0" - ) - - # serialize the estimator - serializer = any_serializer.AnySerializer() - serialization_metadata = serializer.serialize( - to_serialize=self, - gcs_path=os.path.join(remote_job_input_path, "input_estimator"), - **serializer_args.get(self, {}), - ) - requirements += serialization_metadata[ - serializers_base.SERIALIZATION_METADATA_DEPENDENCIES_KEY - ] - # serialize args - for arg_name, arg_value in serialized_args.items(): - if supported_frameworks._is_bigframe(arg_value): - # Throw error for Python 3.11+ and Bigframes Torch - if detected_framework == "torch" and sys.version_info[1] >= 11: - raise ValueError( - "Currently Bigframes Torch serializer does not support" - "Python 3.11+ since torcharrow is not supported on Python 3.11+." - ) - serialization_metadata = serializer.serialize( - to_serialize=arg_value, - gcs_path=os.path.join(remote_job_input_path, f"{arg_name}"), - framework=detected_framework, - **serializer_args.get(arg_value, {}), - ) - else: - serialization_metadata = serializer.serialize( - to_serialize=arg_value, - gcs_path=os.path.join(remote_job_input_path, f"{arg_name}"), - **serializer_args.get(arg_value, {}), - ) - # serializer.get_dependencies() must be run after serializer.serialize() - requirements += serialization_metadata[ - serializers_base.SERIALIZATION_METADATA_DEPENDENCIES_KEY - ] - - # execute the method in CustomJob - # set training configuration - display_name = config.display_name or remote_job - - # get or generate worker_pool_specs - # user can specify either worker_pool_specs OR machine_type etc. - remote_specs._verify_specified_remote_config_values( - config.worker_pool_specs, - config.machine_type, - config.accelerator_type, - config.accelerator_count, - ) - - if not config.container_uri: - container_uri = ( - supported_frameworks._get_cpu_container_uri() - if not enable_cuda - else supported_frameworks._get_gpu_container_uri(self) - ) - requirements = _dedupe_requirements( - vertex_requirements + config.requirements + requirements - ) - else: - container_uri = config.container_uri - requirements = _dedupe_requirements(vertex_requirements + config.requirements) - - requirements = _add_indirect_dependency_versions(requirements) - command = ["export PIP_ROOT_USER_ACTION=ignore &&"] - - # Combine user custom_commands and serializer custom_commands - custom_commands += serialization_metadata[ - serializers_base.SERIALIZATION_METADATA_CUSTOM_COMMANDS_KEY - ] - custom_commands += config.custom_commands - custom_commands = list(dict.fromkeys(custom_commands)) - - if custom_commands: - custom_commands = [f"{command} &&" for command in custom_commands] - command.extend(custom_commands) - if requirements: - command.append("pip install --upgrade pip &&") - requirements = [f"'{requirement}'" for requirement in requirements] - command.append(f"pip install {' '.join(requirements)} &&") - - pass_through_bool_args_flag_value = ",".join( - f"{key}={value}" for key, value in pass_through_bool_args.items() - ) - pass_through_int_args_flag_value = ",".join( - f"{key}={value}" for key, value in pass_through_int_args.items() - ) - pass_through_float_args_flag_value = ",".join( - f"{key}={value}" for key, value in pass_through_float_args.items() - ) - pass_through_str_args_flag_value = ",".join( - f"{key}={value}" for key, value in pass_through_str_args.items() - ) - - autolog_command = " --enable_autolog" if autolog else "" - - training_command = ( - "python3 -m " - "vertexai.preview._workflow.executor.training_script " - f"--pass_through_int_args={pass_through_int_args_flag_value} " - f"--pass_through_float_args={pass_through_float_args_flag_value} " - f"--pass_through_str_args={pass_through_str_args_flag_value} " - f"--pass_through_bool_args={pass_through_bool_args_flag_value} " - f"--input_path={remote_job_input_path.replace('gs://', '/gcs/', 1)} " - f"--output_path={remote_job_output_path.replace('gs://', '/gcs/', 1)} " - f"--method_name={method_name} " - + f"--arg_names={','.join(list(serialized_args.keys()))} " - + f"--enable_cuda={enable_cuda} " - + f"--enable_distributed={config.enable_distributed} " - # For distributed training. Use this to infer tf.distribute strategy for Keras training. - # Keras single worker, multi-gpu needs to be compiled with tf.distribute.MirroredStrategy. - # Keras multi-worker needs to be compiled with tf.distribute.MultiWorkerMirroredStrategy. - + f"--accelerator_count={0 if not config.accelerator_count else config.accelerator_count}" - + autolog_command - ) - command.append(training_command) - # Temporary fix for git not installed in pytorch cuda image - # Remove it once SDK 2.0 is release and don't need to be installed from git - if container_uri == "pytorch/pytorch:2.0.0-cuda11.7-cudnn8-runtime": - command = ["apt-get update && apt-get install -y git &&"] + command - - command = ["sh", "-c", " ".join(command)] - - labels = _set_job_labels(method_name) - - # serialize rewrapper, this is needed to load a model from a CustomJob - filepath = os.path.join( - remote_job_output_path, - model_utils._REWRAPPER_NAME, - ) - serializer.serialize(rewrapper, filepath, **serializer_args.get(rewrapper, {})) - - # Right before making the job, we save the serialization global metadata - input_global_metadata_gcs_uri = os.path.join( - remote_job_input_path, any_serializer.GLOBAL_SERIALIZATION_METADATA - ) - serializer.save_global_metadata(input_global_metadata_gcs_uri) - # create & run the CustomJob - - # disable CustomJob logs - logging.getLogger("google.cloud.aiplatform.jobs").disabled = True - logging.getLogger("google.cloud.aiplatform.preview.jobs").disabled = True - cluster_name = ( - vertexai.preview.global_config.cluster.name - if vertexai.preview.global_config.cluster is not None - else None - ) - try: - job = jobs.CustomJob( - display_name=display_name, - project=vertexai.preview.global_config.project, - location=vertexai.preview.global_config.location, - worker_pool_specs=_get_worker_pool_specs(config, container_uri, command), - base_output_dir=remote_job_base_path, - staging_bucket=remote_job_base_path, - labels=labels, - persistent_resource_id=cluster_name, - ) - - job.submit( - service_account=service_account, - # TODO(jayceeli) Remove this check when manual logging is supported. - experiment=metadata._experiment_tracker.experiment if autolog else None, - experiment_run=metadata._experiment_tracker.experiment_run - if autolog - else None, - ) - job.wait_for_resource_creation() - - _LOGGER.info(f"Remote job created. View the job: {job._dashboard_uri()}") - - _get_remote_logs_until_complete( - job=job, - system_logs=config.enable_full_logs, - ) - except Exception as e: - raise e - finally: - # enable CustomJob logs after remote training job is done - logging.getLogger("google.cloud.aiplatform.jobs").disabled = False - logging.getLogger("google.cloud.aiplatform.preview.jobs").disabled = False - - if job.state in jobs._JOB_ERROR_STATES: - return job - - add_model_to_history_obj = False - - # retrieve the result from gcs to local - # First, load the global metadata - output_global_metadata_gcs_uri = os.path.join( - remote_job_output_path, any_serializer.GLOBAL_SERIALIZATION_METADATA - ) - serializer.load_global_metadata(output_global_metadata_gcs_uri) - if method_name in supported_frameworks.REMOTE_TRAINING_STATEFUL_OVERRIDE_LIST: - estimator = serializer.deserialize( - os.path.join(remote_job_output_path, model_utils._OUTPUT_ESTIMATOR_DIR), - ) - - if supported_frameworks._is_sklearn(self): - _update_sklearn_model_inplace(self, estimator) - - elif supported_frameworks._is_keras(self): - add_model_to_history_obj = True - _update_keras_model_inplace(self, estimator) - - elif supported_frameworks._is_torch(self): - _update_torch_model_inplace(self, estimator) - - elif supported_frameworks._is_lightning(self): - _update_lightning_trainer_inplace(self, estimator) - # deserialize and update the trained model as well - trained_model = serializer.deserialize( - os.path.join( - remote_job_output_path, model_utils._OUTPUT_ESTIMATOR_DIR, "model" - ) - ) - _update_torch_model_inplace(serialized_args["model"], trained_model) - else: - # if it's a custom model, update the model object by iterating its - # attributes. A custom model is any class that has a method - # decorated by @vertexai.preview.developer.mark.train (and optionally - # another method decorated by @vertexai.preview.developer.mark.predict). - _common_update_model_inplace(self, estimator) - - if method_name in supported_frameworks.REMOTE_PREDICTION_OVERRIDE_LIST: - predictions = serializer.deserialize( - os.path.join(remote_job_output_path, model_utils._OUTPUT_PREDICTIONS_DIR) - ) - return predictions - - # Note: "output_data" refers to general output from the executed method, not - # just a transformed data. - try: - # TODO b/296584472: figure out a general mechanism to populate - # inter-object references. - if add_model_to_history_obj: - output_data = serializer.deserialize( - os.path.join(remote_job_output_path, "output_data"), model=self - ) - else: - output_data = serializer.deserialize( - os.path.join(remote_job_output_path, "output_data") - ) - return output_data - except Exception as e: - _LOGGER.warning( - f"Fail to deserialize the output due to error {e}, " "returning None." - ) - return None diff --git a/vertexai/preview/_workflow/executor/training_script.py b/vertexai/preview/_workflow/executor/training_script.py deleted file mode 100644 index e0add25a8a..0000000000 --- a/vertexai/preview/_workflow/executor/training_script.py +++ /dev/null @@ -1,234 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -"""Training script to be run in Vertex CustomJob. -""" - -# import modules -import os - -from absl import app -from absl import flags -import vertexai -from vertexai.preview._workflow.serialization_engine import ( - any_serializer, - serializers_base, -) -from vertexai.preview._workflow.shared import ( - constants, - supported_frameworks, - model_utils, -) -from vertexai.preview.developer import remote_specs - - -os.environ["_IS_VERTEX_REMOTE_TRAINING"] = "True" - -print(constants._START_EXECUTION_MSG) - -_ARGS = flags.DEFINE_list( - "arg_names", [], "Argument names of those to be deserialized." -) -# TODO(b/274979556): consider other approaches to pass around the primitives -_PASS_THROUGH_INT_ARGS = flags.DEFINE_list( - "pass_through_int_args", [], "Pass-through integer arguments." -) -_PASS_THROUGH_FLOAT_ARGS = flags.DEFINE_list( - "pass_through_float_args", [], "Pass-through float arguments." -) -_PASS_THROUGH_BOOL_ARGS = flags.DEFINE_list( - "pass_through_bool_args", [], "Pass-through bool arguments." -) -_PASS_THROUGH_STR_ARGS = flags.DEFINE_list( - "pass_through_str_args", [], "Pass-through string arguments." -) -_METHOD_NAME = flags.DEFINE_string("method_name", None, "Method being called") - -_INPUT_PATH = flags.DEFINE_string("input_path", None, "input path.") -_OUTPUT_PATH = flags.DEFINE_string("output_path", None, "output path.") -_ENABLE_AUTOLOG = flags.DEFINE_bool("enable_autolog", False, "enable autolog.") -_ENABLE_CUDA = flags.DEFINE_bool("enable_cuda", False, "enable cuda.") -_ENABLE_DISTRIBUTED = flags.DEFINE_bool( - "enable_distributed", False, "enable distributed training." -) -_ACCELERATOR_COUNT = flags.DEFINE_integer( - "accelerator_count", - 0, - "accelerator count for single worker, multi-gpu training.", -) - - -# pylint: disable=protected-access -def main(argv): - del argv - - # set cuda for tensorflow & pytorch - try: - import tensorflow - - if not _ENABLE_CUDA.value: - tensorflow.config.set_visible_devices([], "GPU") - except ImportError: - pass - - try: - import torch - - torch.set_default_device("cuda" if _ENABLE_CUDA.value else "cpu") - except ImportError: - torch = None - - strategy = None - try: - from tensorflow import keras # noqa: F401 - - # distribute strategy must be initialized at the beginning of the program - # to avoid RuntimeError: "Collective ops must be configured at program startup" - strategy = remote_specs._get_keras_distributed_strategy( - _ENABLE_DISTRIBUTED.value, _ACCELERATOR_COUNT.value - ) - - except ImportError: - pass - - if _ENABLE_AUTOLOG.value: - vertexai.preview.init(autolog=True) - - # retrieve the estimator - serializer = any_serializer.AnySerializer() - # load the global metadata - serializer.load_global_metadata( - os.path.join(_INPUT_PATH.value, any_serializer.GLOBAL_SERIALIZATION_METADATA) - ) - - estimator = serializer.deserialize( - os.path.join(_INPUT_PATH.value, "input_estimator") - ) - - if strategy and supported_frameworks._is_keras(estimator): - # Single worker, multi-gpu will be compiled with tf.distribute.MirroredStrategy. - # Multi-worker will be compiled with tf.distribute.MultiWorkerMirroredStrategy. - # Single worker CPU/GPU will be returned as is. - estimator = remote_specs._set_keras_distributed_strategy(estimator, strategy) - - if supported_frameworks._is_lightning(estimator): - from lightning.pytorch.trainer.connectors.accelerator_connector import ( - _AcceleratorConnector, - ) - - # Re-instantiate accelerator connecotor in remote environment. Most of configs - # like strategy, devices will be automatically handled by - # the _AcceleratorConnector class. - # accelerator and num_nodes need to be manually set. - accelerator = "gpu" if _ENABLE_CUDA.value else "cpu" - num_nodes = ( - remote_specs._get_cluster_spec().get_world_size() - if _ENABLE_DISTRIBUTED.value - else 1 - ) - estimator._accelerator_connector = _AcceleratorConnector( - accelerator=accelerator, - num_nodes=num_nodes, - ) - - # retrieve seriliazed_args - kwargs = {} - for arg_name in _ARGS.value: - arg_value = serializer.deserialize(os.path.join(_INPUT_PATH.value, arg_name)) - - if supported_frameworks._is_torch_dataloader(arg_value): - # update gpu setting in dataloader for pytorch model gpu training - # lightning will automatically handle the data so no need to update - if supported_frameworks._is_torch(estimator) and _ENABLE_CUDA.value: - arg_value.pin_memory = True - arg_value.pin_memory_device = "cuda" - arg_value.generator = torch.Generator("cuda") - if hasattr(arg_value.sampler, "generator"): - setattr(arg_value.sampler, "generator", arg_value.generator) - # make sure the torch default device is the same as - # dataloader generator's device - torch.set_default_device( - arg_value.generator.device.type if arg_value.generator else "cpu" - ) - - kwargs[arg_name] = arg_value - - for arg_name_and_arg_value in _PASS_THROUGH_INT_ARGS.value: - arg_name, arg_value = arg_name_and_arg_value.split("=") - kwargs[arg_name] = int(arg_value) - for arg_name_and_arg_value in _PASS_THROUGH_FLOAT_ARGS.value: - arg_name, arg_value = arg_name_and_arg_value.split("=") - kwargs[arg_name] = float(arg_value) - for arg_name_and_arg_value in _PASS_THROUGH_BOOL_ARGS.value: - arg_name, arg_value = arg_name_and_arg_value.split("=") - kwargs[arg_name] = bool(arg_value) - for arg_name_and_arg_value in _PASS_THROUGH_STR_ARGS.value: - arg_name, arg_value = arg_name_and_arg_value.split("=") - kwargs[arg_name] = arg_value - - # for all custom trainers, set cluster_spec if available - if ( - isinstance(estimator, vertexai.preview.VertexModel) - and _ENABLE_DISTRIBUTED.value - ): - setattr(estimator, "cluster_spec", remote_specs._get_cluster_spec()) - if supported_frameworks._is_torch(estimator): - # need to know if GPU training is enabled for the - # optional remote_specs.setup_pytorch_distributed_training() - # function that a user can call in train() - setattr(estimator, "_enable_cuda", _ENABLE_CUDA.value) - - output = getattr(estimator, _METHOD_NAME.value)(**kwargs) - - # serialize the output - os.makedirs(_OUTPUT_PATH.value, exist_ok=True) - - if ( - _METHOD_NAME.value - in supported_frameworks.REMOTE_TRAINING_STATEFUL_OVERRIDE_LIST - ): - # for distributed training, chief saves output to specified output - # directory while non-chief workers save output to temp directory. - output_path = remote_specs._get_output_path_for_distributed_training( - _OUTPUT_PATH.value, model_utils._OUTPUT_ESTIMATOR_DIR - ) - serializer.serialize(estimator, output_path) - - # for pytorch lightning trainer, we want to serialize the trained model as well - if "model" in _ARGS.value: - serializer.serialize(kwargs["model"], os.path.join(output_path, "model")) - - # for remote prediction - if _METHOD_NAME.value in supported_frameworks.REMOTE_PREDICTION_OVERRIDE_LIST: - serializer.serialize( - output, - os.path.join(_OUTPUT_PATH.value, model_utils._OUTPUT_PREDICTIONS_DIR), - ) - - output_path = remote_specs._get_output_path_for_distributed_training( - _OUTPUT_PATH.value, "output_data" - ) - try: - serializer.serialize(output, output_path) - except serializers_base.SerializationError as e: - print(f"failed to serialize the output due to {e}") - serializer.save_global_metadata( - os.path.join(_OUTPUT_PATH.value, any_serializer.GLOBAL_SERIALIZATION_METADATA) - ) - - print(constants._END_EXECUTION_MSG) - - -if __name__ == "__main__": - app.run(main) diff --git a/vertexai/preview/_workflow/launcher/__init__.py b/vertexai/preview/_workflow/launcher/__init__.py deleted file mode 100644 index 4ddf9980d6..0000000000 --- a/vertexai/preview/_workflow/launcher/__init__.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from typing import Any - -from vertexai.preview._workflow import executor -from vertexai.preview._workflow import shared - - -class _WorkflowLauncher: - """Launches workflows either locally or remotely.""" - - def launch(self, invokable: shared._Invokable, global_remote: bool, rewrapper: Any): - - local_remote = invokable.vertex_config.remote - - if local_remote or (local_remote is None and global_remote): - result = self._remote_launch(invokable, rewrapper) - else: - for _, arg in invokable.bound_arguments.arguments.items(): - if "bigframes" in repr(type(arg)): - raise ValueError( - "Bigframes not supported if vertexai.preview.init(remote=False)" - ) - result = self._local_launch(invokable) - return result - - def _remote_launch(self, invokable: shared._Invokable, rewrapper: Any) -> Any: - result = executor._workflow_executor.remote_execute( - invokable, rewrapper=rewrapper - ) - # TODO(b/277343861) workflow tracking goes here - # E.g., initializer.global_config.workflow.add_remote_step(invokable, result) - - return result - - def _local_launch(self, invokable: shared._Invokable) -> Any: - result = executor._workflow_executor.local_execute(invokable) - # TODO(b/277343861) workflow tracking goes here - # E.g., initializer.global_config.workflow.add_local_step(invokable, result) - - return result diff --git a/vertexai/preview/_workflow/serialization_engine/__init__.py b/vertexai/preview/_workflow/serialization_engine/__init__.py deleted file mode 100644 index b24e67a831..0000000000 --- a/vertexai/preview/_workflow/serialization_engine/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/vertexai/preview/_workflow/serialization_engine/any_serializer.py b/vertexai/preview/_workflow/serialization_engine/any_serializer.py deleted file mode 100644 index 64ed527e75..0000000000 --- a/vertexai/preview/_workflow/serialization_engine/any_serializer.py +++ /dev/null @@ -1,578 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# pylint: disable=line-too-long, bad-continuation,protected-access -"""Defines the Serializer classes.""" -import collections -import dataclasses -import importlib -import json -import os -import sys -import tempfile -from typing import Any, Dict, Union, List, TypeVar, Type, Optional - -from google.cloud.aiplatform import base -from google.cloud.aiplatform.utils import gcs_utils -from vertexai.preview._workflow.serialization_engine import ( - serializers, - serializers_base, -) -from vertexai.preview._workflow.shared import ( - supported_frameworks, -) - -from packaging import requirements - - -T = TypeVar("T") - -_LOGGER = base.Logger("vertexai.serialization_engine") - -SERIALIZATION_METADATA_SERIALIZER_KEY = "serializer" -SERIALIZATION_METADATA_DEPENDENCIES_KEY = "dependencies" -SERIALIZATION_ARGS_DIRNAME = "serialization_args" -GLOBAL_SERIALIZATION_METADATA = "global_serialization_metadata.json" - -_LIGHTNING_ROOT_DIR = "/vertex_lightning_root_dir/" -_JSONABLE_TYPES = Union[int, float, bytes, bool, str, None] - -# This is a collection of all the predefined serializers and the fully qualified -# class names that these serializers are intended to be used on. -_PREDEFINED_SERIALIZERS = frozenset( - [ - ("sklearn.base.BaseEstimator", serializers.SklearnEstimatorSerializer), - ("tensorflow.keras.models.Model", serializers.KerasModelSerializer), - ( - "tensorflow.keras.callbacks.History", - serializers.KerasHistoryCallbackSerializer, - ), - ("tensorflow.data.Dataset", serializers.TFDatasetSerializer), - ("torch.nn.Module", serializers.TorchModelSerializer), - ("torch.utils.data.DataLoader", serializers.TorchDataLoaderSerializer), - ("lightning.pytorch.Trainer", serializers.LightningTrainerSerializer), - ("bigframes.dataframe.DataFrame", serializers.BigframeSerializer), - ("pandas.DataFrame", serializers.PandasDataSerializer), - ] -) - - -def get_arg_path_from_file_gcs_uri(gcs_uri: str, arg_name: str) -> str: - """Gets the argument gcs path from the to-be-serialized object's gcs uri.""" - # TODO(b/306392189): add an intermediate directory to differentiate - # arguments for different objects. - prefix = serializers.get_uri_prefix(gcs_uri=gcs_uri) - return os.path.join( - prefix, - SERIALIZATION_ARGS_DIRNAME, - arg_name, - ) - - -def _is_the_same_gcs_path(gcs_path_form1, gcs_path_form2) -> bool: - if gcs_path_form1 in ( - gcs_path_form2, - gcs_path_form2.replace("gs://", "/gcs/"), - gcs_path_form2.replace("/gcs/", "gs://"), - ): - return True - return False - - -@dataclasses.dataclass -class SerializerArg: - value: _JSONABLE_TYPES = None - gcs_path: Optional[str] = None - - @classmethod - def from_dict(cls, d: Dict[str, Any]): - if d.get("value", None) is not None and d.get("gcs_path", None) is not None: - raise ValueError("Only one of value or gcs_path should be provided.") - value = d.get("value", None) - if sys.version_info < (3, 10): - # in Python <=3.9, we couldn't use subscriptable generics for instance - # checks. - if value is not None and type(value) not in (int, float, bytes, bool, str): - raise ValueError( - "Only string, int, float, bool, bytes and None are supported " - f"while a {type(value)} {value} is provided." - ) - else: - if value is not None and not isinstance(value, _JSONABLE_TYPES): - raise ValueError( - "Only string, int, float, bool, bytes and None are supported " - f"while a {type(value)} {value} is provided." - ) - return cls(value, d.get("gcs_path", None)) - - def to_dict(self): - return {"value": self.value, "gcs_path": self.gcs_path} - - def to_jsonable_dict(self): - return self.to_dict() - - -@dataclasses.dataclass -class SerializedEntryMetadata: - # TODO(b/307272556): consider deprecate either serialization_id or obj. - serialization_id: str - serializer_args: Dict[str, SerializerArg] = dataclasses.field( - default_factory=collections.defaultdict - ) - obj: Any = None - - @classmethod - def from_dict(cls, d: Dict[str, Any]): - return cls( - d.get("serialization_id", None), - { - key: SerializerArg.from_dict(value) - for key, value in d["serializer_args"].items() - }, - d.get("obj", None), - ) - - def to_dict(self): - return { - "serialization_id": self.serialization_id, - "serializer_args": { - key: value.to_dict() for key, value in self.serializer_args.items() - }, - "obj": self.obj, - } - - def to_jsonable_dict(self): - # We'll not save the object to jsonized data - return { - "serialization_id": self.serialization_id, - "serializer_args": { - key: value.to_jsonable_dict() - for key, value in self.serializer_args.items() - }, - } - - -class SerializedDict(dict): - """A dict that ensures all the gcs_path keys are starting with gs://""" - - def __getitem__(self, __key, /): - if __key.startswith("/gcs/") and __key in self.keys(): - value = super().__getitem__(__key) - new_key = __key.replace("/gcs/", "gs://") - super().__setitem__(new_key, value) - super().__delitem__(__key) - return super().__getitem__(new_key) - elif __key.startswith("/gcs/"): - value = super().__getitem__(__key.replace("/gcs/", "gs://")) - return value - return super().__getitem__(__key) - - def __setitem__(self, __key, __value, /): - if __key.startswith("/gcs/"): - super().__setitem__(__key.replace("/gcs/", "gs://"), __value) - super().__setitem__(__key, __value) - - def __delitem__(self, __key, /): - if __key.startswith("/gcs/") and __key not in self.keys(): - super().__delitem__(__key.replace("/gcs/", "gs://")) - super().__delitem__(__key) - - def get(self, key, default=None): - new_key = key.replace("/gcs/", "gs://") - return super().get(new_key, default) - - -@dataclasses.dataclass -class AnySerializationMetadata(serializers_base.SerializationMetadata): - """Metadata of AnySerializer class.""" - - # serialized is a dict from the gcs path of the serialized to its serialization metadata - serialized: SerializedDict = dataclasses.field(default_factory=SerializedDict) - - @classmethod - def from_dict(cls, d: Dict[str, Any]): - return cls( - serializer=d.get("serializer", None), - dependencies=d.get("dependencies", None), - serialized=SerializedDict( - { - key: SerializedEntryMetadata.from_dict(value) - for key, value in d["serialized"].items() - } - ), - ) - - def to_dict(self): - dct = super().to_dict() - dct.update( - { - "serialized": { - key: value.to_dict() for key, value in self.serialized.items() - } - } - ) - return dct - - def to_jsonable_dict(self): - dct = super().to_jsonable_dict() - dct.update( - { - "serialized": { - key: value.to_jsonable_dict() - for key, value in self.serialized.items() - } - } - ) - return dct - - -def _check_dependency_versions(required_packages: List[str]): - for package in required_packages: - requirement = requirements.Requirement(package) - package_name = requirement.name - current_version = supported_frameworks._get_version_for_package(package_name) - if not requirement.specifier.contains(current_version): - _LOGGER.warning( - "%s's version is %s, while the required version is %s", - package_name, - current_version, - requirement.specifier, - ) - - -def _get_custom_serializer_path_from_file_gcs_uri( - gcs_uri: str, serializer_name: str -) -> str: - prefix = serializers.get_uri_prefix(gcs_uri=gcs_uri) - return os.path.join(prefix, f"{serializer_name}") - - -class AnySerializer(serializers_base.Serializer): - """A serializer that can routes any object to their own serializer.""" - - _metadata: AnySerializationMetadata = AnySerializationMetadata( - serializer="AnySerializer" - ) - - def __init__(self): - super().__init__() - # Register with default serializers - AnySerializer._register(object, serializers.CloudPickleSerializer) - - for args in _PREDEFINED_SERIALIZERS: - AnySerializer._register_predefined_serializer(*args) - - @classmethod - def _get_custom_serializer(cls, type_cls): - return cls._custom_serialization_scheme.get(type_cls) - - @classmethod - def _get_predefined_serializer(cls, type_cls): - return cls._serialization_scheme.get(type_cls) - - @classmethod - def _register_predefined_serializer( - cls, - full_class_name: str, - serializer: serializers_base.Serializer, - ): - """Registers a predefined serializer to AnySerializer.""" - try: - module_name, class_name = full_class_name.rsplit(".", 1) - module = importlib.import_module(module_name) - to_serialize_class = getattr(module, class_name) - - AnySerializer._register(to_serialize_class, serializer) - _LOGGER.debug(f"Successfully registered {serializer}") - - except Exception as e: - _LOGGER.debug(f"Failed to register {serializer} due to: {e}") - - def _gcs_path_in_metadata(self, obj) -> Optional[str]: - """Checks if an object has been (de-)serialized before.""" - for key, value in self._metadata.serialized.items(): - if obj is value.obj: - return key - - def _update_metadata_for_obj( - self, - to_serialize: T, - new_gcs_path: str, - serializer_args: Optional[Dict[str, SerializerArg]] = None, - ): - for key, value in self._metadata.serialized.items(): - if to_serialize is value.obj and not _is_the_same_gcs_path( - key, new_gcs_path - ): - self._metadata.serialized[new_gcs_path] = value - del self._metadata.serialized[key] - return - - new_value = SerializedEntryMetadata( - serialization_id=id(to_serialize), - serializer_args=serializer_args, - obj=to_serialize, - ) - - self._metadata.serialized[new_gcs_path] = new_value - - def save_global_metadata(self, gcs_path: str): - """Saves the current global metadata to the specified gcs_path.""" - if gcs_path.startswith("gs://"): - with tempfile.NamedTemporaryFile(mode="wt") as temp_file: - json.dump(self._metadata.to_jsonable_dict(), temp_file) - temp_file.flush() - temp_file.seek(0) - - gcs_utils.upload_to_gcs(temp_file.name, gcs_path) - else: - # In distributed training, one worker could have written this global - # dataset and keep opening it will raise FileExistsError. - # TODO(b/306434083): Find the right error type to catch and put the - # `with open` in a try clause. This is because, even with the - # os.path.exists() check, it can still happen that during the check, - # the file doesn't exist but it exists while we are writing. - if os.path.exists(gcs_path): - _LOGGER.info("%s already exists, returning", gcs_path) - return - try: - with open(gcs_path, "w") as f: - json.dump(self._metadata.to_jsonable_dict(), f) - except Exception as e: - _LOGGER.warning( - "Failed to save global metadata to %s due to error %s", gcs_path, e - ) - - def load_global_metadata(self, gcs_path: str) -> Dict[str, Any]: - """Loads the current global metadata from the specified gcs_path.""" - if gcs_path.startswith("gs://"): - with tempfile.NamedTemporaryFile() as temp_file: - gcs_utils.download_file_from_gcs(gcs_path, temp_file.name) - with open(temp_file.name, mode="rb") as f: - metadata = json.load(f) - else: - with open(gcs_path, "rb") as f: - metadata = json.load(f) - - self._metadata = AnySerializationMetadata.from_dict(metadata) - - def serialize(self, to_serialize: T, gcs_path: str, **kwargs) -> Dict[str, Any]: - """Simplified version of serialize().""" - metadata_path = serializers.get_metadata_path_from_file_gcs_uri(gcs_path) - gcs_path_in_metadata = self._gcs_path_in_metadata(to_serialize) - # The object has been serialized, this likely happens when this code - # is run on the remote side (CustomJob) - if gcs_path_in_metadata and not kwargs: - serializer_args = self._metadata.serialized[ - gcs_path_in_metadata - ].serializer_args - else: - serializer_args = kwargs.copy() - _LOGGER.debug("serializer_args is %s", serializer_args) - - for i, step_type in enumerate( - to_serialize.__class__.__mro__ + to_serialize.__class__.__mro__ - ): - # Iterate through the custom serialization scheme first. - if ( - i < len(to_serialize.__class__.__mro__) - and step_type not in AnySerializer._custom_serialization_scheme - ) or ( - i >= len(to_serialize.__class__.__mro__) - and step_type not in AnySerializer._serialization_scheme - ): - continue - elif i < len(to_serialize.__class__.__mro__): - serializer = AnySerializer._get_custom_serializer( - step_type - ).get_instance() # pytype: disable=attribute-error - # If the Serializer is a custom Serializer, serialize the - # Custom Serializer first. - serializer_path = _get_custom_serializer_path_from_file_gcs_uri( - gcs_path, serializer.__class__.__name__ - ) - serializers.CloudPickleSerializer().serialize( - serializer, serializer_path - ) - else: - serializer = AnySerializer._get_predefined_serializer( - step_type - ).get_instance() - - try: - # Sometimes the returned gcs_path can be different from the - # passed-in gcs_path. The serialize() could add a suffix, for - # example. - gcs_path_returned = serializer.serialize( - to_serialize=to_serialize, gcs_path=gcs_path, **serializer_args - ) - # Don't fail if the gcs_path_returned is None, we'll keep using - # the original gcs_path. - gcs_path = gcs_path_returned or gcs_path - except Exception as e: # pylint: disable=broad-exception-caught - if serializer.__class__.__name__ != "CloudPickleSerializer": - _LOGGER.warning( - "Failed to serialize %s with %s due to error %s", - to_serialize.__class__.__name__, - serializer.__class__.__name__, - e, - ) - # Falling back to Serializers of super classes - continue - else: - raise serializers_base.SerializationError from e - - local_metadata = serializer._metadata.to_dict() - serializers_base.write_and_upload_data( - json.dumps(local_metadata).encode(), metadata_path - ) - - # Serialize the parameters if needed. - # TODO(b/296584472): remove the iteration once the serialization of - # nested objects can be automatically detected. - for arg_name, arg_value in kwargs.items(): - if type(arg_value) not in (int, float, bool, bytes, str, list, dict): - arg_serialized_gcs_path = get_arg_path_from_file_gcs_uri( - gcs_path, arg_name - ) - self.serialize(arg_value, arg_serialized_gcs_path) - serializer_args[arg_name] = SerializerArg( - gcs_path=arg_serialized_gcs_path - ) - else: - serializer_args[arg_name] = SerializerArg(value=arg_value) - - self._update_metadata_for_obj( - to_serialize, gcs_path, serializer_args=serializer_args - ) - - return local_metadata - - def deserialize(self, serialized_gcs_path: str, **kwargs) -> T: - """Routes the corresponding Serializer based on the metadata.""" - _LOGGER.debug("deserializing from %s.", serialized_gcs_path) - # Note: do not use "in" to check the key. Use "get()". - # This is because the "serialized" field is not of the built-in dict - # type. - if self._metadata.serialized.get(serialized_gcs_path, None) is None: - _LOGGER.warning( - "gcs_path %s not found in the metadata. " - "Make sure global serialization metadata is loaded.", - serialized_gcs_path, - ) - serializer_args = {} - else: - serializer_args = self._metadata.serialized[ - serialized_gcs_path - ].serializer_args - - for arg_name, serializer_arg in serializer_args.items(): - if serializer_arg.value is not None: - kwargs[arg_name] = serializer_arg.value - else: - kwargs[arg_name] = self.deserialize( - serialized_gcs_path=serializer_arg.gcs_path - ) - - local_metadata = serializers._get_metadata(serialized_gcs_path) - - _LOGGER.debug( - "deserializing from %s, metadata is %s", serialized_gcs_path, local_metadata - ) - - serializer_cls_name = local_metadata[SERIALIZATION_METADATA_SERIALIZER_KEY] - packages = local_metadata[SERIALIZATION_METADATA_DEPENDENCIES_KEY] - _check_dependency_versions(packages) - serializer_class = getattr( - serializers, serializer_cls_name, None - ) or globals().get(serializer_cls_name) - if not serializer_class: - # Serializer is an unregistered custom Serializer. - # Deserialize serializer. - serializer_path = _get_custom_serializer_path_from_file_gcs_uri( - serialized_gcs_path, serializer_cls_name - ) - serializer = serializers.CloudPickleSerializer().deserialize( - serialized_gcs_path=serializer_path - ) - else: - serializer = serializer_class.get_instance() - - for key, value in local_metadata.items(): - setattr(serializer.__class__._metadata, key, value) - - _LOGGER.debug( - "using serializer %s to deserialize from path %s, w/ kwargs %s", - serializer.__class__.__name__, - serialized_gcs_path, - kwargs, - ) - obj = serializer.deserialize(serialized_gcs_path=serialized_gcs_path, **kwargs) - if not serializer_class: - # Register the serializer - AnySerializer.register_custom(obj.__class__, serializer.__class__) - AnySerializer._instances[serializer.__class__] = serializer - if ( - self._metadata.serialized.get(serialized_gcs_path, None) is not None - ): # don't use "in" - self._metadata.serialized[serialized_gcs_path].obj = obj - else: - _LOGGER.warning( - "the gcs_path %s doesn't exist in the metadata." - " Please make sure the global metadata is loaded.", - serialized_gcs_path, - ) - self._metadata.serialized[serialized_gcs_path] = SerializedEntryMetadata( - serialization_id=id(obj), obj=obj - ) - return obj - - -def register_serializer( - to_serialize_type: Type[Any], serializer_cls: Type[serializers_base.Serializer] -): - """Registers a Serializer for a specific type. - - Example Usage: - - ``` - import vertexai - - # define a custom Serializer - class KerasCustomSerializer( - vertexai.preview.developer.Serializer): - _metadata = vertexai.preview.developer.SerializationMetadata() - - def serialize(self, to_serialize, gcs_path): - ... - def deserialize(self, gcs_path): - ... - - KerasCustomSerializer.register_requirements( - ['library1==1.0.0', 'library2<2.0']) - vertexai.preview.developer.register_serializer( - keras.models.Model, KerasCustomSerializer) - ``` - - Args: - to_serialize_type: The class that is supposed to be serialized with - the to-be-registered custom Serializer. - serializer_cls: The custom Serializer to be registered. - """ - any_serializer = AnySerializer() - any_serializer.register_custom( - to_serialize_type=to_serialize_type, serializer_cls=serializer_cls - ) diff --git a/vertexai/preview/_workflow/serialization_engine/serializers.py b/vertexai/preview/_workflow/serialization_engine/serializers.py deleted file mode 100644 index cf49f1f2b8..0000000000 --- a/vertexai/preview/_workflow/serialization_engine/serializers.py +++ /dev/null @@ -1,1422 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# pylint: disable=line-too-long, bad-continuation,protected-access -"""Defines the Serializer classes.""" - -import dataclasses -import functools -import json -import os -import pathlib -import pickle -import shutil -import tempfile -from typing import Any, Dict, Optional, Union, TYPE_CHECKING -import uuid - -from google.cloud.aiplatform.utils import gcs_utils -from vertexai.preview._workflow.shared import constants -from vertexai.preview._workflow.shared import ( - data_serializer_utils, - supported_frameworks, -) -from vertexai.preview._workflow.serialization_engine import ( - serializers_base, -) - -from packaging import version - -try: - # pylint: disable=g-import-not-at-top - import cloudpickle -except ImportError: - cloudpickle = None - -SERIALIZATION_METADATA_FRAMEWORK_KEY = "framework" - -if TYPE_CHECKING: - try: - from tensorflow import keras - import tensorflow as tf - - KerasModel = keras.models.Model - TFDataset = tf.data.Dataset - except ImportError: - keras = None - tf = None - KerasModel = Any - TFDataset = Any - import torch - - -_LIGHTNING_ROOT_DIR = "/vertex_lightning_root_dir/" -SERIALIZATION_METADATA_FILENAME = "serialization_metadata" - -# Map tf major.minor version to tfio version from https://pypi.org/project/tensorflow-io/ -_TFIO_VERSION_DICT = { - "2.3": "0.16.0", # Align with testing_extra_require: tensorflow >= 2.3.0 - "2.4": "0.17.1", - "2.5": "0.19.1", - "2.6": "0.21.0", - "2.7": "0.23.1", - "2.8": "0.25.0", - "2.9": "0.26.0", - "2.10": "0.27.0", - "2.11": "0.31.0", - "2.12": "0.32.0", - "2.13": "0.34.0", # TODO(b/295580335): Support TF 2.13 -} -DEFAULT_TENSORFLOW_BATCHSIZE = 32 - - -def get_uri_prefix(gcs_uri: str) -> str: - """Gets the directory of the gcs_uri. - - Example: - 1) file uri: - _get_uri_prefix("gs:///directory/file.extension") == "gs:// - /directory/" - 2) folder uri: - _get_uri_prefix("gs:///parent_dir/dir") == "gs:/// - parent_dir/" - Args: - gcs_uri: A string starting with "gs://" that represent a gcs uri. - Returns: - The parent gcs directory in string format. - """ - # For tensorflow, the uri may be "gs://my-bucket/saved_model/" - if gcs_uri.endswith("/"): - gcs_uri = gcs_uri[:-1] - gcs_pathlibpath = pathlib.Path(gcs_uri) - file_name = gcs_pathlibpath.name - return gcs_uri[: -len(file_name)] - - -def get_metadata_path_from_file_gcs_uri(gcs_uri: str) -> str: - gcs_pathlibpath = pathlib.Path(gcs_uri) - prefix = get_uri_prefix(gcs_uri=gcs_uri) - return os.path.join( - prefix, - f"{SERIALIZATION_METADATA_FILENAME}_{gcs_pathlibpath.stem}.json", - ) - - -def _get_metadata(gcs_uri: str) -> Dict[str, Any]: - metadata_file = get_metadata_path_from_file_gcs_uri(gcs_uri) - if metadata_file.startswith("gs://"): - with tempfile.NamedTemporaryFile() as temp_file: - gcs_utils.download_file_from_gcs(metadata_file, temp_file.name) - with open(temp_file.name, mode="rb") as f: - metadata = json.load(f) - else: - with open(metadata_file, "rb") as f: - metadata = json.load(f) - - return metadata - - -def _is_valid_gcs_path(path: str) -> bool: - """checks if a path is a valid gcs path. - - Args: - path (str): - Required. A file path. - - Returns: - A boolean that indicates whether the path is a valid gcs path. - """ - return path.startswith(("gs://", "/gcs/", "gcs/")) - - -def _load_torch_model(path: str, map_location: "torch.device") -> "torch.nn.Module": - import torch - - try: - return torch.load(path, map_location=map_location) - except Exception: - return torch.load(path, map_location=torch.device("cpu")) - - -class KerasModelSerializationMetadata(serializers_base.SerializationMetadata): - save_format: str = "keras" - - def to_dict(self): - dct = super().to_dict() - dct.update({"save_format": self.save_format}) - return dct - - -def _get_temp_file_or_dir(is_file: bool = True, file_suffix: Optional[str] = None): - return ( - tempfile.NamedTemporaryFile(suffix=file_suffix) - if is_file - else tempfile.TemporaryDirectory() - ) - - -class KerasModelSerializer(serializers_base.Serializer): - """A serializer for tensorflow.keras.models.Model objects.""" - - _metadata: KerasModelSerializationMetadata = KerasModelSerializationMetadata( - serializer="KerasModelSerializer" - ) - - def serialize( - self, to_serialize: "keras.models.Model", gcs_path: str, **kwargs # noqa: F821 - ) -> str: # pytype: disable=invalid-annotation - """Serializes a tensorflow.keras.models.Model to a gcs path. - - Args: - to_serialize (keras.models.Model): - Required. A Keras Model object. - gcs_path (str): - Required. A GCS uri that the model will be saved to. - - Returns: - The GCS uri. - - Raises: - ValueError: if `gcs_path` is not a valid GCS uri. - """ - save_format = kwargs.get("save_format", "keras") - if not _is_valid_gcs_path(gcs_path): - raise ValueError(f"Invalid gcs path: {gcs_path}") - - KerasModelSerializer._metadata.dependencies = ( - supported_frameworks._get_deps_if_tensorflow_model(to_serialize) - ) - KerasModelSerializer._metadata.save_format = save_format - - if not gcs_path.endswith(".keras") and save_format == "keras": - gcs_path = gcs_path + ".keras" - if not gcs_path.endswith(".h5") and save_format == "h5": - gcs_path = gcs_path + ".h5" - - is_file = save_format != "tf" - if gcs_path.startswith("gs://"): - # For tf (saved_model) format, the serialized data is a directory, - # while for keras and h5 formats, the serialized data is a file. - with _get_temp_file_or_dir( - is_file=is_file, file_suffix=f".{save_format}" - ) as temp_file_or_dir: - to_serialize.save( - temp_file_or_dir.name if is_file else temp_file_or_dir, - save_format=save_format, - ) - gcs_utils.upload_to_gcs( - temp_file_or_dir.name if is_file else temp_file_or_dir, gcs_path - ) - else: - to_serialize.save(gcs_path, save_format=save_format) - return gcs_path - - def deserialize( - self, serialized_gcs_path: str, **kwargs - ) -> "keras.models.Model": # noqa: F821 - """Deserialize a tensorflow.keras.models.Model given the gcs file name. - - Args: - serialized_gcs_path (str): - Required. A GCS path to the serialized file. - - Returns: - A Keras Model. - - Raises: - ValueError: if `serialized_gcs_path` is not a valid GCS uri. - ImportError: if tensorflow is not installed. - """ - del kwargs - if not _is_valid_gcs_path(serialized_gcs_path): - raise ValueError(f"Invalid gcs path: {serialized_gcs_path}") - - metadata = _get_metadata(serialized_gcs_path) - # For backward compatibility, if the metadata doesn't contain - # save_format, we assume the model was saved as saved_model format. - save_format = metadata.get("save_format", "tf") - - try: - from tensorflow import keras - - if save_format == "keras" and not serialized_gcs_path.endswith(".keras"): - serialized_gcs_path = serialized_gcs_path + ".keras" - if save_format == "h5" and not serialized_gcs_path.endswith(".h5"): - serialized_gcs_path = serialized_gcs_path + ".h5" - # For tf (saved_model) format, the serialized data is a directory, - # while for keras and h5 formats, the serialized data is a file. - is_file = save_format != "tf" - if serialized_gcs_path.startswith("gs://"): - with _get_temp_file_or_dir( - is_file=is_file, file_suffix=f".{save_format}" - ) as temp_file_or_dir: - if is_file: - gcs_utils.download_file_from_gcs( - serialized_gcs_path, temp_file_or_dir.name - ) - return keras.models.load_model(temp_file_or_dir.name) - else: - gcs_utils.download_from_gcs( - serialized_gcs_path, temp_file_or_dir - ) - return keras.models.load_model(temp_file_or_dir) - else: - return keras.models.load_model(serialized_gcs_path) - except ImportError as e: - raise ImportError("tensorflow is not installed.") from e - - -class KerasHistoryCallbackSerializer(serializers_base.Serializer): - """A serializer for tensorflow.keras.callbacks.History objects.""" - - _metadata: serializers_base.SerializationMetadata = ( - serializers_base.SerializationMetadata( - serializer="KerasHistoryCallbackSerializer" - ) - ) - - def serialize(self, to_serialize, gcs_path: str, **kwargs): - """Serializes a keras History callback to a gcs path. - - Args: - to_serialize (keras.callbacks.History): - Required. A History object. - gcs_path (str): - Required. A GCS uri that History object will be saved to. - - Returns: - The GCS uri. - - Raises: - ValueError: if `gcs_path` is not a valid GCS uri. - """ - del kwargs - if not _is_valid_gcs_path(gcs_path): - raise ValueError(f"Invalid gcs path: {gcs_path}") - - KerasHistoryCallbackSerializer._metadata.dependencies = ["cloudpickle"] - - to_serialize_dict = to_serialize.__dict__ - del to_serialize_dict["model"] - with open(gcs_path, "wb") as f: - cloudpickle.dump(to_serialize_dict, f) - - return gcs_path - - def deserialize(self, serialized_gcs_path: str, **kwargs): - """Deserialize a keras.callbacks.History given the gcs file name. - - Args: - serialized_gcs_path (str): - Required. A GCS path to the serialized file. - - Returns: - A keras.callbacks.History object. - - Raises: - ValueError: if `serialized_gcs_path` is not a valid GCS uri. - """ - from tensorflow import keras - - if not _is_valid_gcs_path(serialized_gcs_path): - raise ValueError(f"Invalid gcs path: {serialized_gcs_path}") - model = kwargs.get("model", None) - # Only "model" is needed. - del kwargs - - history_dict = {} - if serialized_gcs_path.startswith("gs://"): - with tempfile.NamedTemporaryFile() as temp_file: - gcs_utils.download_file_from_gcs(serialized_gcs_path, temp_file.name) - with open(temp_file.name, mode="rb") as f: - history_dict = cloudpickle.load(f) - else: - with open(serialized_gcs_path, mode="rb") as f: - history_dict = cloudpickle.load(f) - - history_obj = keras.callbacks.History() - - for attr_name, attr_value in history_dict.items(): - setattr(history_obj, attr_name, attr_value) - - if model: - history_obj.set_model(model) - - return history_obj - - -class SklearnEstimatorSerializer(serializers_base.Serializer): - """A serializer that uses pickle to save/load sklearn estimators.""" - - _metadata: serializers_base.SerializationMetadata = ( - serializers_base.SerializationMetadata(serializer="SklearnEstimatorSerializer") - ) - - def serialize( - self, - to_serialize: "sklearn.base.BaseEstimator", # noqa: F821 - gcs_path: str, - **kwargs, - ) -> str: - """Serializes a sklearn estimator to a gcs path. - - Args: - to_serialize (sklearn.base.BaseEstimator): - Required. A sklearn estimator. - gcs_path (str): - Required. A GCS uri that the estimator will be saved to. - - Returns: - The GCS uri. - - Raises: - ValueError: if `gcs_path` is not a valid GCS uri. - """ - del kwargs - if not _is_valid_gcs_path(gcs_path): - raise ValueError(f"Invalid gcs path: {gcs_path}") - - SklearnEstimatorSerializer._metadata.dependencies = ( - supported_frameworks._get_deps_if_sklearn_model(to_serialize) - ) - serialized = pickle.dumps(to_serialize, protocol=constants.PICKLE_PROTOCOL) - serializers_base.write_and_upload_data(data=serialized, gcs_filename=gcs_path) - - return gcs_path - - def deserialize( - self, serialized_gcs_path: str, **kwargs - ) -> "sklearn.base.BaseEstimator": # noqa: F821 - """Deserialize a sklearn estimator given the gcs file name. - - Args: - serialized_gcs_path (str): - Required. A GCS path to the serialized file. - - Returns: - A sklearn estimator. - - Raises: - ValueError: if `serialized_gcs_path` is not a valid GCS uri. - """ - del kwargs - if not _is_valid_gcs_path(serialized_gcs_path): - raise ValueError(f"Invalid gcs path: {serialized_gcs_path}") - - if serialized_gcs_path.startswith("gs://"): - with tempfile.NamedTemporaryFile() as temp_file: - gcs_utils.download_file_from_gcs(serialized_gcs_path, temp_file.name) - with open(temp_file.name, mode="rb") as f: - obj = pickle.load(f) - else: - with open(serialized_gcs_path, mode="rb") as f: - obj = pickle.load(f) - - return obj - - -class TorchModelSerializer(serializers_base.Serializer): - """A serializer for torch.nn.Module objects.""" - - _metadata: serializers_base.SerializationMetadata = ( - serializers_base.SerializationMetadata(serializer="TorchModelSerializer") - ) - - def serialize( - self, to_serialize: "torch.nn.Module", gcs_path: str, **kwargs - ) -> str: - """Serializes a torch.nn.Module to a gcs path. - - Args: - to_serialize (torch.nn.Module): - Required. A PyTorch model object. - gcs_path (str): - Required. A GCS uri that the model will be saved to. - - Returns: - The GCS uri. - - Raises: - ValueError: if `gcs_path` is not a valid GCS uri. - """ - import torch - - del kwargs - if not _is_valid_gcs_path(gcs_path): - raise ValueError(f"Invalid gcs path: {gcs_path}") - - TorchModelSerializer._metadata.dependencies = ( - supported_frameworks._get_deps_if_torch_model(to_serialize) - ) - - if gcs_path.startswith("gs://"): - with tempfile.NamedTemporaryFile() as temp_file: - torch.save( - to_serialize, - temp_file.name, - pickle_module=cloudpickle, - pickle_protocol=constants.PICKLE_PROTOCOL, - ) - gcs_utils.upload_to_gcs(temp_file.name, gcs_path) - else: - torch.save( - to_serialize, - gcs_path, - pickle_module=cloudpickle, - pickle_protocol=constants.PICKLE_PROTOCOL, - ) - - return gcs_path - - def deserialize(self, serialized_gcs_path: str, **kwargs) -> "torch.nn.Module": - """Deserialize a torch.nn.Module given the gcs file name. - - Args: - serialized_gcs_path (str): - Required. A GCS path to the serialized file. - - Returns: - A torch.nn.Module model. - - Raises: - ValueError: if `serialized_gcs_path` is not a valid GCS uri. - ImportError: if torch is not installed. - """ - del kwargs - if not _is_valid_gcs_path(serialized_gcs_path): - raise ValueError(f"Invalid gcs path: {serialized_gcs_path}") - - try: - import torch - except ImportError as e: - raise ImportError("torch is not installed.") from e - - # Get the default device in the local torch environment. - # If `set_default_device` hasn't been called, _GLOBAL_DEVICE_CONTEXT - # should be None, then we set map_location to None as well. - map_location = None - # In torch 2.3.0, get_default_device is introduced - if hasattr(torch._GLOBAL_DEVICE_CONTEXT, "device_context") and hasattr( - torch, "get_default_device" - ): - map_location = torch.get_default_device() - # For older versions, we get default device from _GLOBAL_DEVICE_CONTEXT - elif hasattr(torch._GLOBAL_DEVICE_CONTEXT, "device"): - map_location = torch._GLOBAL_DEVICE_CONTEXT.device - - if serialized_gcs_path.startswith("gs://"): - with tempfile.NamedTemporaryFile() as temp_file: - gcs_utils.download_file_from_gcs(serialized_gcs_path, temp_file.name) - model = _load_torch_model(temp_file.name, map_location=map_location) - else: - model = _load_torch_model(serialized_gcs_path, map_location=map_location) - - return model - - -# TODO(b/289386023) Add unit tests for LightningTrainerSerialzier -class LightningTrainerSerializer(serializers_base.Serializer): - """A serializer for lightning.pytorch.Trainer objects.""" - - _metadata: serializers_base.SerializationMetadata = ( - serializers_base.SerializationMetadata(serializer="LightningTrainerSerializer") - ) - - def _serialize_to_local( - self, to_serialize: "lightning.pytorch.Trainer", path: str # noqa: F821 - ): - """Serializes a lightning.pytorch.Trainer to a local path. - - Args: - to_serialize (lightning.pytorch.Trainer): - Required. A lightning trainer object. - path (str): - Required. A local_path that the trainer will be saved to. - """ - # In remote environment, we store local accelerator connector and default root - # dir as attributes when we deserialize the trainer. And we need to serialize - # them in order to retrieve in local environment. - if getattr(to_serialize, "_vertex_local_accelerator_connector", None): - with open(f"{path}/local_accelerator_connector", "wb") as f: - cloudpickle.dump( - to_serialize._vertex_local_accelerator_connector, - f, - protocol=constants.PICKLE_PROTOCOL, - ) - delattr(to_serialize, "_vertex_local_accelerator_connector") - else: - with open(f"{path}/local_accelerator_connector", "wb") as f: - cloudpickle.dump( - to_serialize._accelerator_connector, - f, - protocol=constants.PICKLE_PROTOCOL, - ) - - if getattr(to_serialize, "_vertex_local_default_root_dir", None): - with open(f"{path}/local_default_root_dir", "wb") as f: - cloudpickle.dump( - to_serialize._vertex_local_default_root_dir, - f, - protocol=constants.PICKLE_PROTOCOL, - ) - delattr(to_serialize, "_vertex_local_default_root_dir") - else: - with open(f"{path}/local_default_root_dir", "wb") as f: - cloudpickle.dump( - to_serialize._default_root_dir, - f, - protocol=constants.PICKLE_PROTOCOL, - ) - - with open(f"{path}/trainer", "wb") as f: - cloudpickle.dump(to_serialize, f, protocol=constants.PICKLE_PROTOCOL) - - if os.path.exists(to_serialize.logger.root_dir): - shutil.copytree( - to_serialize.logger.root_dir, - f"{path}/{to_serialize.logger.name}", - dirs_exist_ok=True, - ) - - def serialize( - self, - to_serialize: "lightning.pytorch.Trainer", # noqa: F821 - gcs_path: str, - **kwargs, - ) -> str: - """Serializes a lightning.pytorch.Trainer to a gcs path. - - Args: - to_serialize (lightning.pytorch.Trainer): - Required. A lightning trainer object. - gcs_path (str): - Required. A GCS uri that the trainer will be saved to. - - Returns: - The GCS uri. - - Raises: - ValueError: if `gcs_path` is not a valid GCS uri. - """ - del kwargs - if not _is_valid_gcs_path(gcs_path): - raise ValueError(f"Invalid gcs path: {gcs_path}") - - LightningTrainerSerializer._metadata.dependencies = ( - supported_frameworks._get_deps_if_lightning_model(to_serialize) - + supported_frameworks._get_cloudpickle_deps() - ) - - if gcs_path.startswith("gs://"): - with tempfile.TemporaryDirectory() as temp_dir: - self._serialize_to_local(to_serialize, temp_dir) - gcs_utils.upload_to_gcs(temp_dir, gcs_path) - else: - os.makedirs(gcs_path) - self._serialize_to_local(to_serialize, gcs_path) - - return gcs_path - - def _deserialize_from_local( - self, path: str - ) -> "lightning.pytorch.Trainer": # noqa: F821 - """Deserialize a lightning.pytorch.Trainer given a local path. - - Args: - path (str): - Required. A local path to the serialized trainer. - - Returns: - A lightning.pytorch.Trainer object. - """ - with open(f"{path}/trainer", "rb") as f: - trainer = cloudpickle.load(f) - - if os.getenv("_IS_VERTEX_REMOTE_TRAINING") == "True": - # Store the logs in the cwd of remote environment. - trainer._default_root_dir = _LIGHTNING_ROOT_DIR - for logger in trainer.loggers: - # for TensorBoardLogger - if getattr(logger, "_root_dir", None): - logger._root_dir = trainer.default_root_dir - # for CSVLogger - if getattr(logger, "_save_dir", None): - logger._save_dir = trainer.default_root_dir - - # Store local accelerator connector and root dir as attributes, so that - # we can retrieve them in local environment. - with open(f"{path}/local_accelerator_connector", "rb") as f: - trainer._vertex_local_accelerator_connector = cloudpickle.load(f) - - with open(f"{path}/local_default_root_dir", "rb") as f: - trainer._vertex_local_default_root_dir = cloudpickle.load(f) - else: - with open(f"{path}/local_accelerator_connector", "rb") as f: - trainer._accelerator_connector = cloudpickle.load(f) - - with open(f"{path}/local_default_root_dir", "rb") as f: - trainer._default_root_dir = cloudpickle.load(f) - - for logger in trainer.loggers: - if getattr(logger, "_root_dir", None): - logger._root_dir = trainer.default_root_dir - if getattr(logger, "_save_dir", None): - logger._save_dir = trainer.default_root_dir - - for callback in trainer.checkpoint_callbacks: - callback.dirpath = os.path.join( - trainer.default_root_dir, - callback.dirpath.replace(_LIGHTNING_ROOT_DIR, ""), - ) - if callback.best_model_path: - callback.best_model_path = os.path.join( - trainer.default_root_dir, - callback.best_model_path.replace(_LIGHTNING_ROOT_DIR, ""), - ) - if callback.kth_best_model_path: - callback.kth_best_model_path = os.path.join( - trainer.default_root_dir, - callback.kth_best_model_path.replace(_LIGHTNING_ROOT_DIR, ""), - ) - if callback.last_model_path: - callback.last_model_path = os.path.join( - trainer.default_root_dir, - callback.last_model_path.replace(_LIGHTNING_ROOT_DIR, ""), - ) - - if os.path.exists(f"{path}/{trainer.logger.name}"): - shutil.copytree( - f"{path}/{trainer.logger.name}", - trainer.logger.root_dir, - dirs_exist_ok=True, - ) - - return trainer - - def deserialize( - self, serialized_gcs_path: str, **kwargs - ) -> "lightning.pytorch.Trainer": # noqa: F821 - """Deserialize a lightning.pytorch.Trainer given the gcs path. - - Args: - serialized_gcs_path (str): - Required. A GCS path to the serialized file. - - Returns: - A lightning.pytorch.Trainer object. - - Raises: - ValueError: if `serialized_gcs_path` is not a valid GCS uri. - """ - del kwargs - if not _is_valid_gcs_path(serialized_gcs_path): - raise ValueError(f"Invalid gcs path: {serialized_gcs_path}") - - if serialized_gcs_path.startswith("gs://"): - with tempfile.TemporaryDirectory() as temp_dir: - gcs_utils.download_from_gcs(serialized_gcs_path, temp_dir) - trainer = self._deserialize_from_local(temp_dir) - else: - trainer = self._deserialize_from_local(serialized_gcs_path) - - return trainer - - -class TorchDataLoaderSerializer(serializers_base.Serializer): - """A serializer for torch.utils.data.DataLoader objects.""" - - _metadata: serializers_base.SerializationMetadata = ( - serializers_base.SerializationMetadata(serializer="TorchDataLoaderSerializer") - ) - - def _serialize_to_local( - self, to_serialize: "torch.utils.data.DataLoader", path: str - ): - """Serializes a torch.utils.data.DataLoader to a local path. - - Args: - to_serialize (torch.utils.data.DataLoader): - Required. A pytorch dataloader object. - path (str): - Required. A local_path that the dataloader will be saved to. - """ - # save objects by cloudpickle - with open(f"{path}/dataset.cpkl", "wb") as f: - cloudpickle.dump( - to_serialize.dataset, f, protocol=constants.PICKLE_PROTOCOL - ) - - with open(f"{path}/collate_fn.cpkl", "wb") as f: - cloudpickle.dump( - to_serialize.collate_fn, f, protocol=constants.PICKLE_PROTOCOL - ) - - with open(f"{path}/worker_init_fn.cpkl", "wb") as f: - cloudpickle.dump( - to_serialize.worker_init_fn, f, protocol=constants.PICKLE_PROTOCOL - ) - - # save (str, int, float, bool) values into a json file - pass_through_args = { - "num_workers": to_serialize.num_workers, - "pin_memory": to_serialize.pin_memory, - "timeout": to_serialize.timeout, - "prefetch_factor": to_serialize.prefetch_factor, - "persistent_workers": to_serialize.persistent_workers, - "pin_memory_device": to_serialize.pin_memory_device, - } - - # dataloader.generator is a torch.Generator object that defined in c++ - # it cannot be serialized by cloudpickle, so we store its device information - # and re-instaintiate a new Generator object with this device when deserializing - pass_through_args["generator_device"] = ( - to_serialize.generator.device.type if to_serialize.generator else None - ) - - # batch_sampler option is mutually exclusive with batch_size, shuffle, - # sampler, and drop_last. - # for default batch sampler we store batch_size, drop_last, and sampler object - # but not batch sampler object. - import torch - - if isinstance(to_serialize.batch_sampler, torch.utils.data.BatchSampler): - pass_through_args["batch_size"] = to_serialize.batch_size - pass_through_args["drop_last"] = to_serialize.drop_last - - with open(f"{path}/sampler.cpkl", "wb") as f: - cloudpickle.dump( - to_serialize.sampler, f, protocol=constants.PICKLE_PROTOCOL - ) - # otherwise we only serialize batch sampler and skip batch_size, drop_last, - # and sampler object. - else: - with open(f"{path}/batch_sampler.cpkl", "wb") as f: - cloudpickle.dump( - to_serialize.batch_sampler, f, protocol=constants.PICKLE_PROTOCOL - ) - - with open(f"{path}/pass_through_args.json", "w") as f: - json.dump(pass_through_args, f) - - def serialize( - self, to_serialize: "torch.utils.data.DataLoader", gcs_path: str, **kwargs - ) -> str: - """Serializes a torch.utils.data.DataLoader to a gcs path. - - Args: - to_serialize (torch.utils.data.DataLoader): - Required. A pytorch dataloader object. - gcs_path (str): - Required. A GCS uri that the dataloader will be saved to. - - Returns: - The GCS uri. - - Raises: - ValueError: if `gcs_path` is not a valid GCS uri. - """ - del kwargs - if not _is_valid_gcs_path(gcs_path): - raise ValueError(f"Invalid gcs path: {gcs_path}") - - TorchDataLoaderSerializer._metadata.dependencies = ( - supported_frameworks._get_deps_if_torch_dataloader(to_serialize) - ) - - if gcs_path.startswith("gs://"): - with tempfile.TemporaryDirectory() as temp_dir: - self._serialize_to_local(to_serialize, temp_dir) - gcs_utils.upload_to_gcs(temp_dir, gcs_path) - else: - os.makedirs(gcs_path) - self._serialize_to_local(to_serialize, gcs_path) - - return gcs_path - - def _deserialize_from_local(self, path: str) -> "torch.utils.data.DataLoader": - """Deserialize a torch.utils.data.DataLoader given a local path. - - Args: - path (str): - Required. A local path to the serialized dataloader. - - Returns: - A torch.utils.data.DataLoader object. - - Raises: - ImportError: if torch is not installed. - """ - try: - import torch - except ImportError as e: - raise ImportError( - f"torch is not installed and required to deserialize the file from {path}." - ) from e - - with open(f"{path}/pass_through_args.json", "r") as f: - kwargs = json.load(f) - - # re-instantiate Generator - if kwargs["generator_device"] is not None: - kwargs["generator"] = torch.Generator( - kwargs["generator_device"] if torch.cuda.is_available() else "cpu" - ) - kwargs.pop("generator_device") - - with open(f"{path}/dataset.cpkl", "rb") as f: - kwargs["dataset"] = cloudpickle.load(f) - - with open(f"{path}/collate_fn.cpkl", "rb") as f: - kwargs["collate_fn"] = cloudpickle.load(f) - - with open(f"{path}/worker_init_fn.cpkl", "rb") as f: - kwargs["worker_init_fn"] = cloudpickle.load(f) - - try: - with open(f"{path}/sampler.cpkl", "rb") as f: - kwargs["sampler"] = cloudpickle.load(f) - except FileNotFoundError: - pass - - try: - with open(f"{path}/batch_sampler.cpkl", "rb") as f: - kwargs["batch_sampler"] = cloudpickle.load(f) - except FileNotFoundError: - pass - - return torch.utils.data.DataLoader(**kwargs) - - def deserialize( - self, serialized_gcs_path: str, **kwargs - ) -> "torch.utils.data.DataLoader": - """Deserialize a torch.utils.data.DataLoader given the gcs path. - - Args: - serialized_gcs_path (str): - Required. A GCS path to the serialized file. - - Returns: - A torch.utils.data.DataLoader object. - - Raises: - ValueError: if `serialized_gcs_path` is not a valid GCS uri. - ImportError: if torch is not installed. - """ - del kwargs - if not _is_valid_gcs_path(serialized_gcs_path): - raise ValueError(f"Invalid gcs path: {serialized_gcs_path}") - - if serialized_gcs_path.startswith("gs://"): - with tempfile.TemporaryDirectory() as temp_dir: - gcs_utils.download_from_gcs(serialized_gcs_path, temp_dir) - dataloader = self._deserialize_from_local(temp_dir) - else: - dataloader = self._deserialize_from_local(serialized_gcs_path) - - return dataloader - - -class TFDatasetSerializer(serializers_base.Serializer): - """Serializer responsible for serializing/deserializing a tf.data.Dataset.""" - - _metadata: serializers_base.SerializationMetadata = ( - serializers_base.SerializationMetadata(serializer="TFDatasetSerializer") - ) - - def serialize( - self, to_serialize: "tf.data.Dataset", gcs_path: str, **kwargs # noqa: F821 - ) -> str: # noqa: F821 - del kwargs - import tensorflow as tf - - if not _is_valid_gcs_path(gcs_path): - raise ValueError(f"Invalid gcs path: {gcs_path}") - TFDatasetSerializer._metadata.dependencies = ( - supported_frameworks._get_deps_if_tensorflow_model(to_serialize) - ) - - try: - to_serialize.save(gcs_path) - except AttributeError: - tf.data.experimental.save(to_serialize, gcs_path) - return gcs_path - - def deserialize( - self, serialized_gcs_path: str, **kwargs - ) -> "tf.data.Dataset": # noqa: F821 - del kwargs - import tensorflow as tf - - try: - deserialized = tf.data.Dataset.load(serialized_gcs_path) - except AttributeError: - deserialized = tf.data.experimental.load(serialized_gcs_path) - return deserialized - - -class PandasDataSerializer(serializers_base.Serializer): - """Serializer for pandas DataFrames.""" - - _metadata: serializers_base.SerializationMetadata = ( - serializers_base.SerializationMetadata(serializer="PandasDataSerializer") - ) - - def serialize( - self, to_serialize: "pandas.DataFrame", gcs_path: str, **kwargs # noqa: F821 - ) -> str: - del kwargs - if not _is_valid_gcs_path(gcs_path): - raise ValueError(f"Invalid gcs path: {gcs_path}") - - PandasDataSerializer._metadata.dependencies = ( - supported_frameworks._get_deps_if_pandas_dataframe(to_serialize) - ) - - if gcs_path.startswith("gs://"): - with tempfile.NamedTemporaryFile() as temp_file: - to_serialize.to_parquet(temp_file.name) - temp_file.flush() - temp_file.seek(0) - - gcs_utils.upload_to_gcs(temp_file.name, gcs_path) - else: - to_serialize.to_parquet(gcs_path) - - def deserialize( - self, serialized_gcs_path: str, **kwargs - ) -> "pandas.DataFrame": # noqa: F821 - del kwargs - try: - import pandas as pd - except ImportError as e: - raise ImportError( - f"pandas is not installed and required to deserialize the file from {serialized_gcs_path}." - ) from e - - if serialized_gcs_path.startswith("gs://"): - with tempfile.NamedTemporaryFile() as temp_file: - gcs_utils.download_file_from_gcs(serialized_gcs_path, temp_file.name) - return pd.read_parquet(temp_file.name) - else: - return pd.read_parquet(serialized_gcs_path) - - -class PandasDataSerializerDev(serializers_base.Serializer): - """Serializer responsible for serializing/deserializing a pandas DataFrame.""" - - _metadata: serializers_base.SerializationMetadata = ( - serializers_base.SerializationMetadata(serializer="PandasDataSerializerDev") - ) - - def __init__(self): - super().__init__() - self.helper = data_serializer_utils._Helper() - - def serialize( - self, to_serialize: "pandas.DataFrame", gcs_path: str, **kwargs # noqa: F821 - ) -> str: - del kwargs - PandasDataSerializerDev._metadata.dependencies = ( - supported_frameworks._get_deps_if_pandas_dataframe(to_serialize) - ) - try: - import pandas as pd - except ImportError as e: - raise ImportError( - f"pandas is not installed and required to serialize {to_serialize}." - ) from e - - try: - import pyarrow as pa - import pyarrow.parquet as pq - except ImportError as e: - raise ImportError( - f"pyarrow is not installed and required to serialize {to_serialize}." - ) from e - - try: - if not ( - isinstance(to_serialize.index, pd.MultiIndex) - or isinstance(to_serialize.columns, pd.MultiIndex) - ): - self.helper.create_placeholder_col_names(to_serialize) - self.helper.cast_int_to_str( - to_serialize, action=data_serializer_utils.ActionType.CAST_COL_NAME - ) - self.helper.cast_int_to_str( - to_serialize, action=data_serializer_utils.ActionType.CAST_ROW_INDEX - ) - self.helper.cast_int_to_str( - to_serialize, - action=data_serializer_utils.ActionType.CAST_CATEGORICAL, - ) - table = pa.Table.from_pandas(to_serialize) - custom_metadata = { - data_serializer_utils.df_restore_func_metadata_key.encode(): json.dumps( - self.helper.restore_df_actions - ).encode(), - data_serializer_utils.df_restore_func_args_metadata_key.encode(): json.dumps( - self.helper.restore_df_actions_args - ).encode(), - **table.schema.metadata, - } - table = table.replace_schema_metadata(custom_metadata) - - with tempfile.TemporaryDirectory() as temp_dir: - fp = os.path.join(temp_dir, f"{uuid.uuid4()}.parquet") - pq.write_table(table, fp, compression="GZIP") - gcs_utils.upload_to_gcs(fp, gcs_path) - finally: - # undo ad-hoc mutations on the dataframe - self.helper.restore_df_actions.reverse() - self.helper.restore_df_actions_args.reverse() - for func_str, args in zip( - self.helper.restore_df_actions, self.helper.restore_df_actions_args - ): - func = getattr(self.helper, func_str) - func(to_serialize, *args) if len(args) > 0 else func(to_serialize) - return gcs_path - - def deserialize( - self, serialized_gcs_path: str, **kwargs - ) -> "pandas.DataFrame": # noqa: F821 - try: - import pyarrow.parquet as pq - except ImportError as e: - raise ImportError( - "pyarrow is not installed and required to deserialize the file " - f"from {serialized_gcs_path}." - ) from e - - del kwargs - restored_table = pq.read_table(serialized_gcs_path) - restored_df = restored_table.to_pandas() - - # get custom metadata - restore_func_array_json = restored_table.schema.metadata[ - data_serializer_utils.df_restore_func_metadata_key.encode() - ] - restore_func_array = json.loads(restore_func_array_json) - restore_func_array_args_json = restored_table.schema.metadata[ - data_serializer_utils.df_restore_func_args_metadata_key.encode() - ] - restore_func_array_args = json.loads(restore_func_array_args_json) - restore_func_array.reverse() - restore_func_array_args.reverse() - - for func_str, args in zip(restore_func_array, restore_func_array_args): - func = getattr(self.helper, func_str) - func(restored_df, *args) if len(args) > 0 else func(restored_df) - return restored_df - - -@dataclasses.dataclass -class BigframeSerializationMetadata(serializers_base.SerializationMetadata): - """Metadata of BigframeSerializer class. - - Stores extra framework attribute - """ - - framework: Optional[str] = None - - def to_dict(self): - dct = super().to_dict() - dct.update({SERIALIZATION_METADATA_FRAMEWORK_KEY: self.framework}) - return dct - - -class BigframeSerializer(serializers_base.Serializer): - """Serializer responsible for serializing/deserializing a BigFrames DataFrame. - - Serialization: All frameworks serialize bigframes.dataframe.DataFrame -> parquet (GCS) - Deserialization: Framework specific deserialize methods are called - """ - - _metadata: serializers_base.SerializationMetadata = BigframeSerializationMetadata( - serializer="BigframeSerializer", framework=None - ) - - def serialize( - self, - to_serialize: Union[ - "bigframes.dataframe.DataFrame", "pandas.DataFrame" # noqa: F821 - ], - gcs_path: str, - **kwargs, - ) -> str: - # All bigframe serializers will convert bigframes.dataframe.DataFrame --> parquet - if not _is_valid_gcs_path(gcs_path): - raise ValueError(f"Invalid gcs path: {gcs_path}") - - # Record the framework in metadata for deserialization - detected_framework = kwargs.get("framework") - BigframeSerializer._metadata.framework = detected_framework - - # Reset dependencies and custom_commands in case the framework is different - BigframeSerializer._metadata.dependencies = [] - BigframeSerializer._metadata.custom_commands = [] - - # Add dependencies based on framework - if detected_framework == "sklearn": - sklearn_deps = supported_frameworks._get_pandas_deps() - sklearn_deps += supported_frameworks._get_pyarrow_deps() - BigframeSerializer._metadata.dependencies += sklearn_deps - elif detected_framework == "torch": - # Install using custom_commands to avoid numpy dependency conflict - BigframeSerializer._metadata.custom_commands.append("pip install torchdata") - BigframeSerializer._metadata.custom_commands.append( - "pip install torcharrow" - ) - elif detected_framework == "tensorflow": - tensorflow_io_dep = "tensorflow-io==" + self._get_tfio_verison() - tensorflow_io_gcs_fs_dep = ( - "tensorflow-io-gcs-filesystem==" + self._get_tfio_verison() - ) - BigframeSerializer._metadata.dependencies.append(tensorflow_io_dep) - BigframeSerializer._metadata.dependencies.append(tensorflow_io_gcs_fs_dep) - - # Check if index.name is default and set index.name if not - if to_serialize.index.name and to_serialize.index.name != "index": - raise ValueError("Index name must be 'index'") - if to_serialize.index.name is None: - to_serialize.index.name = "index" - - # Convert bigframes.dataframe.DataFrame to Parquet (GCS) - parquet_gcs_path = gcs_path + "/*" # path is required to contain '*' - to_serialize.to_parquet(parquet_gcs_path, index=True) - - # Return original gcs_path to retrieve the metadata for later - return gcs_path - - def _get_tfio_verison(self): - import tensorflow as tf - - if tf.__version__ < "2.13.0": - raise ValueError("TensorFlow version < 2.13.0 is not supported.") - - major, minor, _ = version.Version(tf.__version__).release - tf_version = f"{major}.{minor}" - - if tf_version not in _TFIO_VERSION_DICT: - raise ValueError( - f"Tensorflow version {tf_version} is not supported for Bigframes." - + " Supported versions: tensorflow >= 2.3.0, <= 2.12.0." - ) - return _TFIO_VERSION_DICT[tf_version] - - def deserialize( - self, serialized_gcs_path: str, **kwargs - ) -> Union["pandas.DataFrame", "bigframes.dataframe.DataFrame"]: # noqa: F821 - detected_framework = BigframeSerializer._metadata.framework - if detected_framework == "sklearn": - return self._deserialize_sklearn(serialized_gcs_path) - elif detected_framework == "torch": - return self._deserialize_torch(serialized_gcs_path) - elif detected_framework == "tensorflow": - return self._deserialize_tensorflow( - serialized_gcs_path, kwargs.get("batch_size"), kwargs.get("target_col") - ) - else: - raise ValueError(f"Unsupported framework: {detected_framework}") - - def _deserialize_sklearn( - self, serialized_gcs_path: str - ) -> "pandas.DataFrame": # noqa: F821 - """Sklearn deserializes parquet (GCS) --> pandas.DataFrame - - By default, sklearn returns a numpy array which uses CloudPickleSerializer. - If a bigframes.dataframe.DataFrame is desired for the return type, - b/291147206 (cl/548228568) is required - - serialized_gcs_path is a folder containing one or more parquet files. - """ - # Deserialization at remote environment - try: - import pandas as pd - except ImportError as e: - raise ImportError( - f"pandas is not installed and required to deserialize the file from {serialized_gcs_path}." - ) from e - - # Deserialization always happens at remote, so gcs filesystem is mounted to /gcs/ - # pd.read_parquet auto-merges a directory of parquet files - pd_dataframe = pd.read_parquet(serialized_gcs_path) - - # Drop index now that ordering is guaranteed - if "index" in pd_dataframe.columns: - pd_dataframe.drop(columns=["index"], inplace=True) - - return pd_dataframe - - def _deserialize_torch(self, serialized_gcs_path: str) -> "torch.tensor": - """Torch deserializes parquet (GCS) --> torch.tensor - - serialized_gcs_path is a folder containing one or more parquet files. - """ - # Deserialization at remote environment - try: - import torch - except ImportError as e: - raise ImportError( - f"torch is not installed and required to deserialize the file from {serialized_gcs_path}." - ) from e - - try: - from torchdata.datapipes.iter import FileLister - except ImportError as e: - raise ImportError( - f"torchdata is not installed and required to deserialize the file from {serialized_gcs_path}." - ) from e - - # Deserialization always happens at remote, so gcs filesystem is mounted to /gcs/ - # TODO(b/295335262): Implement torch lazy read - source_dp = FileLister(serialized_gcs_path, masks="") - parquet_df_dp = source_dp.load_parquet_as_df() - - def preprocess(torch_df): - torch_df = torch_df.drop("index") - df_tensor = torch_df.to_tensor() - - # Convert from TorchStruct to Tensor - cols = [] - for i in range(len(df_tensor)): - col = df_tensor[i].values - col = col[:, None] - cols.append(col) - deserialized_tensor = torch.cat(cols, 1) - return deserialized_tensor - - parquet_df_dp = parquet_df_dp.map(preprocess) - - def reduce_tensors(a, b): - return torch.concat((a, b), axis=0) - - return functools.reduce(reduce_tensors, list(parquet_df_dp)) - - def _deserialize_tensorflow( - self, - serialized_gcs_path: str, - batch_size: Optional[int] = None, - target_col: Optional[str] = None, - ) -> "tf.data.Dataset": # noqa: F821 - """Tensorflow deserializes parquet (GCS) --> tf.data.Dataset - - serialized_gcs_path is a folder containing one or more parquet files. - """ - # Set default kwarg values - batch_size = batch_size or DEFAULT_TENSORFLOW_BATCHSIZE - target_col = target_col.encode("ASCII") if target_col else b"target" - - # Deserialization at remote environment - import tensorflow as tf - - if tf.__version__ < "2.13.0": - raise ValueError("TensorFlow version < 2.13.0 is not supported.") - - try: - import tensorflow_io as tfio - except ImportError as e: - raise ImportError( - f"tensorflow_io is not installed and required to deserialize the file from {serialized_gcs_path}." - ) from e - - # Deserialization always happens at remote, so gcs filesystem is mounted to /gcs/ - files = os.listdir(serialized_gcs_path + "/") - files = list( - map(lambda file_name: serialized_gcs_path + "/" + file_name, files) - ) - ds = tfio.IODataset.from_parquet(files[0]) - - for file_name in files[1:]: - ds_shard = tfio.IODataset.from_parquet(file_name) - ds = ds.concatenate(ds_shard) - - def map_fn(row): - target = row[target_col] - row = { - k: tf.expand_dims(v, -1) - for k, v in row.items() - if k != target_col and k != b"index" - } - - def reduce_fn(a, b): - return tf.concat((a, b), axis=0) - - return functools.reduce(reduce_fn, row.values()), target - - return ds.map(map_fn).batch(batch_size) - - -class CloudPickleSerializer(serializers_base.Serializer): - """Serializer that uses cloudpickle to serialize the object.""" - - _metadata: serializers_base.SerializationMetadata = ( - serializers_base.SerializationMetadata(serializer="CloudPickleSerializer") - ) - - def serialize(self, to_serialize: Any, gcs_path: str, **kwargs) -> str: - """Use cloudpickle to serialize a python object to a gcs file path. - - Args: - to_serialize (Any): - Required. A python object. - gcs_path (str): - Required. A GCS uri that the estimator will be saved to. - - Returns: - The GCS uri. - - Raises: - ValueError: if `gcs_path` is not a valid GCS uri. - """ - del kwargs - if not _is_valid_gcs_path(gcs_path): - raise ValueError(f"Invalid gcs path: {gcs_path}") - - CloudPickleSerializer._metadata.dependencies = ( - supported_frameworks._get_estimator_requirement(to_serialize) - ) - serialized = cloudpickle.dumps(to_serialize, protocol=constants.PICKLE_PROTOCOL) - serializers_base.write_and_upload_data(data=serialized, gcs_filename=gcs_path) - return gcs_path - - def deserialize(self, serialized_gcs_path: str, **kwargs) -> Any: - """Use cloudpickle to deserialize a python object given the object's gcs file path. - - Args: - serialized_gcs_path (str): - Required. A GCS path to the serialized file. - - Returns: - A python object. - - Raises: - ValueError: if `serialized_gcs_path` is not a valid GCS uri. - """ - del kwargs - if not _is_valid_gcs_path(serialized_gcs_path): - raise ValueError(f"Invalid gcs path: {serialized_gcs_path}") - - if serialized_gcs_path.startswith("gs://"): - with tempfile.NamedTemporaryFile() as temp_file: - gcs_utils.download_file_from_gcs(serialized_gcs_path, temp_file.name) - with open(temp_file.name, mode="rb") as f: - obj = cloudpickle.load(f) - else: - with open(serialized_gcs_path, mode="rb") as f: - obj = cloudpickle.load(f) - - return obj diff --git a/vertexai/preview/_workflow/serialization_engine/serializers_base.py b/vertexai/preview/_workflow/serialization_engine/serializers_base.py deleted file mode 100644 index d5d4f65091..0000000000 --- a/vertexai/preview/_workflow/serialization_engine/serializers_base.py +++ /dev/null @@ -1,278 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# pylint: disable=line-too-long, bad-continuation,protected-access -"""Defines the Serializer classes.""" - -import abc -import dataclasses -import os -import pathlib -import tempfile -from typing import Any, Dict, List, Optional, Type, TypeVar, Union - -from google.cloud.aiplatform.utils import gcs_utils -from vertexai.preview._workflow.shared import data_structures - -T = TypeVar("T") -SERIALIZATION_METADATA_FILENAME = "serialization_metadata" -SERIALIZATION_METADATA_SERIALIZER_KEY = "serializer" -SERIALIZATION_METADATA_DEPENDENCIES_KEY = "dependencies" -SERIALIZATION_METADATA_CUSTOM_COMMANDS_KEY = "custom_commands" - - -SerializerArgs = data_structures.IdAsKeyDict - - -@dataclasses.dataclass -class SerializationMetadata: - """Metadata of Serializer classes. - - This is supposed to be a class attribute named `_metadata` of the Serializer - class. - - Example Usage: - ``` - import vertexai - - # define a custom Serializer - class KerasCustomSerializer( - vertexai.preview.developer.Serializer): - # make a metadata - _metadata = vertexai.preview.developer.SerializationMetadata() - - def serialize(self, to_serialize, gcs_path): - ... - def deserialize(self, gcs_path): - ... - ``` - """ - - serializer: Optional[str] = None - dependencies: List[str] = dataclasses.field(default_factory=list) - custom_commands: List[str] = dataclasses.field(default_factory=list) - - def to_dict(self): - return { - SERIALIZATION_METADATA_SERIALIZER_KEY: self.serializer, - SERIALIZATION_METADATA_DEPENDENCIES_KEY: self.dependencies, - SERIALIZATION_METADATA_CUSTOM_COMMANDS_KEY: self.custom_commands, - } - - def to_jsonable_dict(self): - return self.to_dict() - - -class SerializationError(Exception): - """Raised when the object fails to be serialized.""" - - pass - - -def write_and_upload_data(data: bytes, gcs_filename: str): - """Writes data to a local temp file and uploads the file to gcs. - - Args: - data (bytes): - Required. Bytes data to write. - gcs_filename (str): - Required. A gcs file path. - """ - if gcs_filename.startswith("gs://"): - with tempfile.NamedTemporaryFile() as temp_file: - temp_file.write(data) - temp_file.flush() - temp_file.seek(0) - - gcs_utils.upload_to_gcs(temp_file.name, gcs_filename) - else: - dirname = os.path.dirname(gcs_filename) - if not os.path.exists(dirname): - os.makedirs(dirname) - with open(gcs_filename, mode="wb") as f: - f.write(data) - - -def _get_uri_prefix(gcs_uri: str) -> str: - """Gets the directory of the gcs_uri. - - Example: - 1) file uri: - _get_uri_prefix("gs:///directory/file.extension") == "gs:// - /directory/" - 2) folder uri: - _get_uri_prefix("gs:///parent_dir/dir") == "gs:/// - parent_dir/" - Args: - gcs_uri: A string starting with "gs://" that represent a gcs uri. - Returns: - The parent gcs directory in string format. - """ - # For tensorflow, the uri may be "gs://my-bucket/saved_model/" - if gcs_uri.endswith("/"): - gcs_uri = gcs_uri[:-1] - gcs_pathlibpath = pathlib.Path(gcs_uri) - file_name = gcs_pathlibpath.name - return gcs_uri[: -len(file_name)] - - -def _get_metadata_path_from_file_gcs_uri(gcs_uri: str) -> str: - gcs_pathlibpath = pathlib.Path(gcs_uri) - prefix = _get_uri_prefix(gcs_uri=gcs_uri) - return os.path.join( - prefix, - f"{SERIALIZATION_METADATA_FILENAME}_{gcs_pathlibpath.stem}.json", - ) - - -def _get_custom_serializer_path_from_file_gcs_uri( - gcs_uri: str, serializer_name: str -) -> str: - prefix = _get_uri_prefix(gcs_uri=gcs_uri) - return os.path.join(prefix, f"{serializer_name}") - - -class Serializer(metaclass=abc.ABCMeta): - """Abstract class of serializers. - - custom Serializers should be subclasses of this class. - Example Usage: - - ``` - import vertexai - - # define a custom Serializer - class KerasCustomSerializer( - vertexai.preview.developer.Serializer): - _metadata = vertexai.preview.developer.SerializationMetadata() - - def serialize(self, to_serialize, gcs_path): - ... - def deserialize(self, gcs_path): - ... - - KerasCustomSerializer.register_requirements( - ['library1==1.0.0', 'library2<2.0']) - vertexai.preview.developer.register_serializer( - keras.models.Model, KerasCustomSerializer) - ``` - """ - - _serialization_scheme: Dict[Type[Any], Optional[Type["Serializer"]]] = {} - _custom_serialization_scheme: Dict[Type[Any], Optional[Type["Serializer"]]] = {} - # _instances holds the instance of each Serializer for each type. - _instances: Dict[Type["Serializer"], "Serializer"] = {} - _metadata: SerializationMetadata = SerializationMetadata() - - def __new__(cls): - try: - import cloudpickle # noqa:F401 - except ImportError as e: - raise ImportError( - "cloudpickle is not installed. Please call `pip install google-cloud-aiplatform[preview]`." - ) from e - - if cls not in Serializer._instances: - Serializer._instances[cls] = super().__new__(cls) - if cls._metadata.serializer is None: - cls._metadata.serializer = cls.__name__ - return Serializer._instances[cls] - - @abc.abstractmethod - def serialize( - self, - to_serialize: T, - gcs_path: str, - **kwargs, - ) -> Union[Dict[str, Any], str]: # pytype: disable=invalid-annotation - raise NotImplementedError - - @abc.abstractmethod - def deserialize( - self, - serialized_gcs_path: str, - **kwargs, - ) -> T: # pytype: disable=invalid-annotation - raise NotImplementedError - - @classmethod - def _register( - cls, to_serialize_type: Type[Any], serializer_cls: Type["Serializer"] - ): - cls._serialization_scheme[to_serialize_type] = serializer_cls - - @classmethod - def register_custom( - cls, to_serialize_type: Type[Any], serializer_cls: Type["Serializer"] - ): - """Registers a custom serializer for a specific type. - - Example Usage: - ``` - # define a custom Serializer - class KerasCustomSerializer(serialization_engine.Serializer): - _metadata = serialization_engine.SerializationMetadata() - def serialize(self, to_serialize, gcs_path): - ... - def deserialize(self, gcs_path): - ... - - any_serializer = serialization_engine.AnySerializer() - any_serializer.register_custom(keras.models.Model, KerasCustomSerializer) - ``` - Args: - to_serialize_type: The class that is supposed to be serialized with - the to-be-registered custom Serializer. - serializer_cls: The custom Serializer to be registered. - """ - cls._custom_serialization_scheme[to_serialize_type] = serializer_cls - - @classmethod - def get_instance(cls) -> "Serializer": - if cls not in Serializer._instances: - Serializer._instances[cls] = cls() - return Serializer._instances[cls] - - @classmethod - def _dedupe_deps(cls): - # TODO(b/282719450): Consider letting the later specifier to overwrite - # earlier specifier for the same package, and automatically detecting - # the version if version is not specified. - cls._metadata.dependencies = list(dict.fromkeys(cls._metadata.dependencies)) - - @classmethod - def _dedupe_custom_commands(cls): - cls._metadata.custom_commands = list( - dict.fromkeys(cls._metadata.custom_commands) - ) - - @classmethod - def register_requirement(cls, required_package: str): - # TODO(b/280648121) Consider allowing the user to register the - # installation command so that we support installing packages not - # covered by PyPI in the remote machine. - cls._metadata.dependencies.append(required_package) - cls._dedupe_deps() - - @classmethod - def register_requirements(cls, requirements: List[str]): - cls._metadata.dependencies.extend(requirements) - cls._dedupe_deps() - - @classmethod - def register_custom_command(cls, custom_command: str): - cls._metadata.custom_commands.append(custom_command) - cls._dedupe_custom_commands() diff --git a/vertexai/preview/_workflow/shared/__init__.py b/vertexai/preview/_workflow/shared/__init__.py deleted file mode 100644 index 7d0a81ae8f..0000000000 --- a/vertexai/preview/_workflow/shared/__init__.py +++ /dev/null @@ -1,43 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -import dataclasses -import inspect -from typing import Any, Callable, Dict, Optional - -from vertexai.preview._workflow.shared import configs - - -@dataclasses.dataclass(frozen=True) -class _Invokable: - """Represents a single invokable method. - - method: The method to invoke. - bound_arguments: The arguments to use to invoke the method. - vertex_config: User-specified configs for Vertex services. - remote_executor: The executor that execute the method remotely. - remote_executor_kwargs: kwargs used in the remote executor. - instance: The instance the method is bound. - """ - - method: Callable[..., Any] - bound_arguments: inspect.BoundArguments - vertex_config: configs.VertexConfig - remote_executor: Callable[..., Any] - remote_executor_kwargs: Optional[Dict[str, Any]] = None - instance: Optional[Any] = None diff --git a/vertexai/preview/_workflow/shared/configs.py b/vertexai/preview/_workflow/shared/configs.py deleted file mode 100644 index 4d4153ecad..0000000000 --- a/vertexai/preview/_workflow/shared/configs.py +++ /dev/null @@ -1,373 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import dataclasses -from typing import List, Optional -from vertexai.preview._workflow.serialization_engine import ( - serializers_base, -) - - -@dataclasses.dataclass -class _BaseConfig: - """A class that holds configuration that can be shared across different remote services. - - Attributes: - display_name (str): - The display name of the remote job. - staging_bucket (str): - Base GCS directory of the remote job. All the input and - output artifacts will be saved here. If not provided a timestamped - directory in the default staging bucket will be used. - container_uri (str): - Uri of the training container image to use for remote job. - Support images in Artifact Registry, Container Registry, or Docker Hub. - machine_type (str): - The type of machine to use for remote training. - accelerator_type (str): - Hardware accelerator type. One of ACCELERATOR_TYPE_UNSPECIFIED, - NVIDIA_TESLA_A100, NVIDIA_TESLA_P100, NVIDIA_TESLA_V100, - NVIDIA_TESLA_K80, NVIDIA_TESLA_T4, NVIDIA_TESLA_P4 - accelerator_count (int): - The number of accelerators to attach to a worker replica. - worker_pool_specs (vertexai.preview.developer.remote_specs.WorkerPoolSpecs): - The worker pool specs configuration for a remote job. - """ - - display_name: Optional[str] = None - staging_bucket: Optional[str] = None - container_uri: Optional[str] = None - machine_type: Optional[str] = None - accelerator_type: Optional[str] = None - accelerator_count: Optional[int] = None - worker_pool_specs: Optional[ - "vertexai.preview.developer.remote_specs.WorkerPoolSpecs" # noqa: F821 - ] = None - - -@dataclasses.dataclass -class RemoteConfig(_BaseConfig): - """A class that holds the configuration for Vertex remote training. - - Example usage: - # Specify requirements - model.train.vertex.remote_config.requirements = [ - "requirement1==1.0.0", - "requirement2>=2.0.1", - ] - - # Specify custom commands to run before installing other requirements - model.train.vertex.remote_config.custom_commands = [ - "export SOME_CONSTANT=value", - ] - - # Specify the extra parameters needed for serializing objects. - from vertexai.preview.developer import SerializerArgs - - # You can put all the hashable objects with their arguments in the - # SerializerArgs all at once in a dict. Here we assume "model" is - # hashable. - model.train.vertex.remote_config.serializer_args = SerializerArgs({ - model: { - "extra_serializer_param1_for_model": param1_value, - "extra_serializer_param2_for_model": param2_value, - }, - hashable_obj2: { - "extra_serializer_param1_for_hashable2": param1_value, - "extra_serializer_param2_for_hashable2": param2_value, - }, - }) - # Or if the object to be serialized is unhashable, put them into the - # serializer_args one by one. If this is the only use case, there is - # no need to import `SerializerArgs`. Here we assume "X_train" and - # "y_train" is not hashable. - model.train.vertex.remote_config.serializer_args[X_train] = { - "extra_serializer_param1_for_X_train": param1_value, - "extra_serializer_param2_for_X_train": param2_value, - }, - model.train.vertex.remote_config.serializer_args[y_train] = { - "extra_serializer_param1_for_y_train": param1_value, - "extra_serializer_param2_for_y_train": param2_value, - } - - # Train the model as usual - model.train(X_train, y_train) - - Attributes: - enable_cuda (bool): - When set to True, Vertex will automatically choose a GPU image and - accelerators for the remote job and train the model on cuda devices. - You can also specify the image and accelerators by yourself through - `container_uri`, `accelerator_type`, `accelerator_count`. - Supported frameworks: keras, torch.nn.Module - Default configs: - container_uri=( - "pytorch/pytorch:2.0.0-cuda11.7-cudnn8-runtime" - or - "us-docker.pkg.dev/vertex-ai/training/tf-gpu.2-11.py310:latest" - ) - machine_type="n1-standard-16" - accelerator_type="NVIDIA_TESLA_P100" - accelerator_count=1 - enable_distributed (bool): - When set to True, Vertex will automatically choose a GPU or CPU - distributed training configuration depending on the value of `enable_cuda`. - You can also specify a custom configuration by yourself through `worker_pool_specs`. - Supported frameworks: keras (requires TensorFlow >= 2.12.0), torch.nn.Module - Default configs: - If `enable_cuda` = True, for both the `chief` and `worker` specs: - machine_type="n1-standard-16" - accelerator_type="NVIDIA_TESLA_P100" - accelerator_count=1 - If `enable_cuda` = False, for both the `chief` and `worker` specs: - machine_type="n1-standard-4" - replica_count=1 - enable_full_logs (bool): - When set to True, all the logs from the remote job will be shown locally. - Otherwise, only training related logs will be shown. - service_account (str): - Specifies the service account for running the remote job. To use - autologging feature, you need to set it to "gce", which refers - to the GCE service account, or set it to another service account. - Please make sure your own service account has the Storage Admin role - and Vertex AI User role. - requirements (List[str]): - List of python packages dependencies that will be installed in the remote - job environment. In most cases Vertex will handle the installation of - dependencies that are required for running the remote job. You can use - this field to specify extra packages to install in the remote environment. - custom_commands (List[str]): - List of custom commands to be run in the remote job environment. - These commands will be run before the requirements are installed. - serializer_args: serializers_base.SerializerArgs: - Map from object to extra arguments when serializing the object. The extra - arguments is a dictionary from the argument names to the argument values. - """ - - enable_cuda: bool = False - enable_distributed: bool = False - enable_full_logs: bool = False - service_account: Optional[str] = None - requirements: List[str] = dataclasses.field(default_factory=list) - custom_commands: List[str] = dataclasses.field(default_factory=list) - serializer_args: serializers_base.SerializerArgs = dataclasses.field( - default_factory=serializers_base.SerializerArgs - ) - - -@dataclasses.dataclass -class DistributedTrainingConfig(_BaseConfig): - """A class that holds the configs for a distributed training remote job. - - Attributes: - replica_count (int): - The number of worker replicas. Assigns 1 chief replica and - replica_count - 1 worker replicas. - boot_disk_type (str): - Type of the boot disk (default is `pd-ssd`). - Valid values: `pd-ssd` (Persistent Disk Solid State Drive) or - `pd-standard` (Persistent Disk Hard Disk Drive). - boot_disk_size_gb (int): - Size in GB of the boot disk (default is 100GB). - boot disk size must be within the range of [100, 64000]. - """ - - replica_count: Optional[int] = None - boot_disk_type: Optional[str] = None - boot_disk_size_gb: Optional[int] = None - - -@dataclasses.dataclass -class VertexConfig: - """A class that holds the configuration for the method wrapped by Vertex. - - Attributes: - remote (bool): - Whether or not this method will be executed remotely on Vertex. If not - set, Vertex will check the remote setting in `vertexai.preview.init(...)` - remote_config (RemoteConfig): - A class that holds the configuration for the remote job. - """ - - remote: Optional[bool] = None - remote_config: RemoteConfig = dataclasses.field(default_factory=RemoteConfig) - - def set_config( - self, - display_name: Optional[str] = None, - staging_bucket: Optional[str] = None, - container_uri: Optional[str] = None, - machine_type: Optional[str] = None, - accelerator_type: Optional[str] = None, - accelerator_count: Optional[int] = None, - worker_pool_specs: Optional[ - "vertexai.preview.developer.remote_specs.WorkerPoolSpecs" # noqa: F821 - ] = None, - enable_cuda: bool = False, - enable_distributed: bool = False, - enable_full_logs: bool = False, - service_account: Optional[str] = None, - requirements: List[str] = [], - custom_commands: List[str] = [], - replica_count: Optional[int] = None, - boot_disk_type: Optional[str] = None, - boot_disk_size_gb: Optional[int] = None, - ): - """Sets configuration attributes for a remote job. - - Calling this will overwrite any previously set job configuration attributes. - - Example usage: - vertexai.init( - project=_TEST_PROJECT, - location=_TEST_LOCATION, - staging_bucket=_TEST_BUCKET_NAME, - ) - vertexai.preview.init(remote=True) - - LogisticRegression = vertexai.preview.remote(_logistic.LogisticRegression) - model = LogisticRegression() - - model.fit.vertex.set_config( - display_name="my-display-name", - staging_bucket="gs://my-bucket", - container_uri="gcr.io/custom-image, - ) - - Args: - display_name (str): - The display name of the remote job. - staging_bucket (str): - Base GCS directory of the remote job. All the input and - output artifacts will be saved here. If not provided a timestamped - directory in the default staging bucket will be used. - container_uri (str): - Uri of the training container image to use for remote job. - Support images in Artifact Registry, Container Registry, or Docker Hub. - machine_type (str): - The type of machine to use for remote training. - accelerator_type (str): - Hardware accelerator type. One of ACCELERATOR_TYPE_UNSPECIFIED, - NVIDIA_TESLA_A100, NVIDIA_TESLA_P100, NVIDIA_TESLA_V100, - NVIDIA_TESLA_K80, NVIDIA_TESLA_T4, NVIDIA_TESLA_P4 - accelerator_count (int): - The number of accelerators to attach to a worker replica. - worker_pool_specs (vertexai.preview.developer.remote_specs.WorkerPoolSpecs): - The worker pool specs configuration for a remote job. - enable_cuda (bool): - When set to True, Vertex will automatically choose a GPU image and - accelerators for the remote job and train the model on cuda devices. - This parameter is specifically for TrainingConfig. - You can also specify the image and accelerators by yourself through - `container_uri`, `accelerator_type`, `accelerator_count`. - Supported frameworks: keras, torch.nn.Module - Default configs: - container_uri="pytorch/pytorch:2.0.0-cuda11.7-cudnn8-runtime" or "tensorflow/tensorflow:2.12.0-gpu" - machine_type="n1-standard-16" - accelerator_type="NVIDIA_TESLA_P100" - accelerator_count=1 - enable_distributed (bool): - When set to True, Vertex will automatically choose a GPU or CPU - distributed training configuration depending on the value of `enable_cuda`. - You can also specify a custom configuration by yourself through `worker_pool_specs`. - This parameter is specifically for TrainingConfig. - Supported frameworks: keras (requires TensorFlow >= 2.12.0), torch.nn.Module - Default configs: - If `enable_cuda` = True, for both the `chief` and `worker` specs: - machine_type="n1-standard-16" - accelerator_type="NVIDIA_TESLA_P100" - accelerator_count=1 - If `enable_cuda` = False, for both the `chief` and `worker` specs: - machine_type="n1-standard-4" - replica_count=1 - enable_full_logs (bool): - When set to True, all the logs from the remote job will be shown locally. - Otherwise, only training related logs will be shown. - service_account (str): - Specifies the service account for running the remote job. To use - autologging feature, you need to set it to "gce", which refers - to the GCE service account, or set it to another service account. - Please make sure your own service account has the Storage Admin role - and Vertex AI User role. This parameter is specifically for TrainingConfig. - requirements (List[str]): - List of python packages dependencies that will be installed in the remote - job environment. In most cases Vertex will handle the installation of - dependencies that are required for running the remote job. You can use - this field to specify extra packages to install in the remote environment. - This parameter is specifically for TrainingConfig. - custom_commands (List[str]): - List of custom commands to be run in the remote job environment. - These commands will be run before the requirements are installed. - replica_count (int): - The number of worker replicas. Assigns 1 chief replica and - replica_count - 1 worker replicas. This is specifically for - DistributedTrainingConfig. - boot_disk_type (str): - Type of the boot disk (default is `pd-ssd`). - Valid values: `pd-ssd` (Persistent Disk Solid State Drive) or - `pd-standard` (Persistent Disk Hard Disk Drive). This is specifically for - DistributedTrainingConfig. - boot_disk_size_gb (int): - Size in GB of the boot disk (default is 100GB). - boot disk size must be within the range of [100, 64000]. This is specifically for - DistributedTrainingConfig. - """ - - # locals() contains a 'self' key in addition to function args - kwargs = locals() - - config = self.remote_config.__class__() - - for config_arg in kwargs: - if hasattr(config, config_arg): - setattr(config, config_arg, kwargs[config_arg]) - - # raise if a value was passed for an unsupported config attribute (i.e. boot_disk_type on TrainingConfig) - elif config_arg != "self" and kwargs[config_arg]: - raise ValueError( - f"{type(self.remote_config)} has no attribute {config_arg}." - ) - - self.remote_config = config - - -@dataclasses.dataclass -class PersistentResourceConfig: - """A class that holds persistent resource configuration during initialization. - - Attributes: - name (str): - The cluster name of the remote job. This value may be up to 63 - characters, and valid characters are `[a-z0-9_-]`. The first character - cannot be a number or hyphen. - resource_pool_specs (vertexai.preview.developer.remote_specs.ResourcePoolSpecs): - The worker pool specs configuration for a remote job. - service_account (str): - If intended for experiment autologging, this service account should - be specified and consistent with per instance service account, which - is configured in `model.fit.vertex.remote_config.service_account`. - disable (bool): - By default is False, meaning the remote execution runs on - the persistent cluster. If users want to disable it (so the remote - execution runs on an ephemeral cluster), set it as True. - """ - - name: Optional[str] = None - resource_pools: Optional[ - "vertexai.preview.developer.remote_specs.ResourcePool" # noqa: F821 - ] = None - service_account: Optional[str] = None - disable: Optional[bool] = False diff --git a/vertexai/preview/_workflow/shared/constants.py b/vertexai/preview/_workflow/shared/constants.py deleted file mode 100644 index 4c7e5cd644..0000000000 --- a/vertexai/preview/_workflow/shared/constants.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -"""Constants used by vertexai.""" - -PICKLE_PROTOCOL = 4 - -_START_EXECUTION_MSG = "Start remote execution on Vertex..." -_END_EXECUTION_MSG = "Remote execution is completed." - -_V2_0_WARNING_MSG = """ -After May 30, 2024, importing any code below will result in an error. -Please verify that you are explicitly pinning to a version of `google-cloud-aiplatform` -(e.g., google-cloud-aiplatform==[1.32.0, 1.49.0]) if you need to continue using this -library. - -from vertexai.preview import ( - init, - remote, - VertexModel, - register, - from_pretrained, - developer, - hyperparameter_tuning, - tabular_models, -) -""" diff --git a/vertexai/preview/_workflow/shared/data_serializer_utils.py b/vertexai/preview/_workflow/shared/data_serializer_utils.py deleted file mode 100644 index 43836e5919..0000000000 --- a/vertexai/preview/_workflow/shared/data_serializer_utils.py +++ /dev/null @@ -1,186 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from typing import List, Any, Union -from enum import Enum - -try: - import pandas as pd - - PandasData = pd.DataFrame -except ImportError: - pd = None - PandasData = Any - -df_restore_func_metadata_key = "restore_df_actions" -df_restore_func_args_metadata_key = "restore_df_actions_args" - - -class ActionType(str, Enum): - CAST_COL_NAME = "CAST_COL_NAME" - CAST_ROW_INDEX = "CAST_ROW_INDEX" - CAST_CATEGORICAL = "CAST_CATEGORICAL" - - -class _Helper: - def __init__(self): - if not pd: - raise ImportError( - "pandas is not installed and required for Pandas Serializer." - ) - self.restore_df_actions = [] - self.restore_df_actions_args = [] - self.restore_func_metadata_key = "restore_df_actions" - self.restore_func_args_metadata_key = "restore_df_actions_args" - - def create_placeholder_col_names(self, df: PandasData): - """Creates placeholder column names for dataframes without column names. - - Args: - df (pd.DataFrame): - Required. This is the dataframe to serialize. - """ - if isinstance(df.columns, pd.RangeIndex): - df.columns = [str(x) for x in df.columns] - self.restore_df_actions.append("remove_placeholder_col_names") - self.restore_df_actions_args.append([]) - - def remove_placeholder_col_names(self, df: PandasData): - df.columns = pd.RangeIndex(start=0, stop=len(df.columns), step=1) - - def _append_to_temp_indices( - self, temp_indices: List[str], name: Any, action: ActionType - ): - """ - This function is a helper for the cast_int_to_str function. - - Args: - temp_indices (List[str]): a temporary array of indices that keeps track - of the original values of the column or row indices. - - name (Any): the name of the column or row. Note that this could be any type, - but Vertex only handles integer-to-string casting. Users who attempt to - serialize Pandas dataframes with non-string or non-integer column/row indices - will encounter a runtime error from pyarrow. - - action (ActionType): the enum that tells the deserialization function - at runtime whether a row or a column index is being cast back. - """ - if isinstance(name, int): - temp_indices.append(str(name)) - self.restore_df_actions.append("cast_str_to_int") - self.restore_df_actions_args.append([action, str(name)]) - else: - temp_indices.append(name) - - def cast_int_to_str(self, df: PandasData, action: ActionType): - """ - This function casts integers to strings depending on the action type. - - In the cases of casting integer-indexed columns or rows, the function - will modify the dataframe and append to restore_df_actions that will cast - the column and row indices back to their original data types. - - In the case of handling categorical columns, the function will keep track - of the column names with integers being the primitive data type, preserve - their orders if the column is ordered, and add relevant metadata to the - restore_df_actions and restore_df_actions_args arrays. - - Args: - df (pd.DataFrame): - Required. This is the dataframe to serialize. - action (enum.Enum): - Required. One of [CAST_COL_NAME, CAST_ROW_NAME, CAST_CATEGORICAL] - """ - temp_indices = [] - if action == ActionType.CAST_COL_NAME: - for i in range(len(df.columns)): - self._append_to_temp_indices(temp_indices, df.columns[i], action) - df.columns = temp_indices - elif action == ActionType.CAST_ROW_INDEX: - for i in range(len(df.index)): - self._append_to_temp_indices(temp_indices, df.index[i], action) - df.index = temp_indices - elif action == ActionType.CAST_CATEGORICAL: - columns_to_cast = [] - column_orders = [] - columns_to_reorder = [] - for col_name in df.select_dtypes(include=["category"]): - if df[col_name].cat.ordered: - column_orders.append(df[col_name].cat.categories.values.tolist()) - columns_to_reorder.append(col_name) - # cast the columns with integers as categories - try: - int(df.at[df[col_name].first_valid_index(), col_name]) - columns_to_cast.append(col_name) - # pass on the columns that are non-integers - except ValueError: - pass - self.restore_df_actions.append("restore_category_order") - self.restore_df_actions_args.append([columns_to_reorder, column_orders]) - - self.restore_df_actions.append("cast_str_to_int") - self.restore_df_actions_args.append([action, columns_to_cast]) - - @staticmethod - def cast_str_to_int( - df: PandasData, - action: ActionType, - index_name_or_columns: Union[List[str], str] = None, - ): - """ - This function is used by the deserialization function to undo any temp - workarounds applied to the dataframe during serialization. - - Args: - df (pd.DataFrame): - Required. This is the dataframe to deserialize. - action (enum.Enum): - Required. One of [CAST_COL_NAME, CAST_ROW_NAME, CAST_CATEGORICAL] - index_name_or_columns (Union[List[str], str]): - Required. This is the list of index names to cast back to int - in the case of restoring row or column indices. In the case of - categorical columns, this is the list of column names to restore. - """ - restored_indices = [] - if action == ActionType.CAST_COL_NAME: - for i in range(len(df.columns)): - if df.columns[i] == index_name_or_columns: - restored_indices.append(int(index_name_or_columns)) - else: - restored_indices.append(df.columns[i]) - df.columns = restored_indices - elif action == ActionType.CAST_ROW_INDEX: - for i in range(len(df.index)): - if df.index[i] == index_name_or_columns: - restored_indices.append(int(index_name_or_columns)) - else: - restored_indices.append(df.index[i]) - df.index = restored_indices - elif action == ActionType.CAST_CATEGORICAL: - for column in index_name_or_columns: - df[column] = df[column].astype("int", errors="ignore") - df[column] = df[column].astype("category") - - @staticmethod - def restore_category_order( - df: PandasData, columns: List[str], categories: List[Any] - ): - for (column, category) in zip(columns, categories): - df[column] = df[column].cat.set_categories( - new_categories=category, ordered=True - ) diff --git a/vertexai/preview/_workflow/shared/data_structures.py b/vertexai/preview/_workflow/shared/data_structures.py deleted file mode 100644 index 48c6ebd2d7..0000000000 --- a/vertexai/preview/_workflow/shared/data_structures.py +++ /dev/null @@ -1,68 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -class IdAsKeyDict(dict): - """Customized dict that maps each key to its id before storing the data. - - This subclass of dict still allows one to use the original key during - subscription ([] operator) or via `get()` method. But under the hood, the - keys are the ids of the original keys. - - Example: - # add some hashable objects (key1 and key2) to the dict - id_as_key_dict = IdAsKeyDict({key1: value1, key2: value2}) - # add a unhashable object (key3) to the dict - id_as_key_dict[key3] = value3 - - # can access the value via subscription using the original key - assert id_as_key_dict[key1] == value1 - assert id_as_key_dict[key2] == value2 - assert id_as_key_dict[key3] == value3 - # can access the value via get method using the original key - assert id_as_key_dict.get(key1) == value1 - assert id_as_key_dict.get(key2) == value2 - assert id_as_key_dict.get(key3) == value3 - # but the original keys are not in the dict - the ids are - assert id(key1) in id_as_key_dict - assert id(key2) in id_as_key_dict - assert id(key3) in id_as_key_dict - assert key1 not in id_as_key_dict - assert key2 not in id_as_key_dict - assert key3 not in id_as_key_dict - """ - - def __init__(self, *args, **kwargs): - internal_dict = {} - for arg in args: - for k, v in arg.items(): - internal_dict[id(k)] = v - for k, v in kwargs.items(): - internal_dict[id(k)] = v - super().__init__(internal_dict) - - def __getitem__(self, _key): - internal_key = id(_key) - return super().__getitem__(internal_key) - - def __setitem__(self, _key, _value): - internal_key = id(_key) - return super().__setitem__(internal_key, _value) - - def get(self, key, default=None): - internal_key = id(key) - return super().get(internal_key, default) diff --git a/vertexai/preview/_workflow/shared/model_utils.py b/vertexai/preview/_workflow/shared/model_utils.py deleted file mode 100644 index f64a3fa25d..0000000000 --- a/vertexai/preview/_workflow/shared/model_utils.py +++ /dev/null @@ -1,489 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -"""Model utils. - -Push trained model from local to Model Registry, and pull Model Registry model -to local for uptraining. -""" - -import os -import re -from typing import Any, Dict, Optional, Union -import warnings - -from google.cloud import aiplatform -from google.cloud.aiplatform import base -from google.cloud.aiplatform import utils -from google.cloud.aiplatform import jobs as aiplatform_jobs -import vertexai -from vertexai.preview._workflow import driver -from vertexai.preview._workflow.serialization_engine import ( - any_serializer, - serializers_base, -) -from vertexai.preview._workflow.shared import constants - -# These need to be imported to be included in _ModelGardenModel.__init_subclass__ -from vertexai.language_models import ( - _language_models, -) # pylint:disable=unused-import -from vertexai._model_garden import _model_garden_models -from google.cloud.aiplatform import _publisher_models -from vertexai.preview._workflow.executor import training -from google.cloud.aiplatform.compat.types import job_state as gca_job_state - - -_SKLEARN_FILE_NAME = "model.pkl" -_TF_DIR_NAME = "saved_model" -_PYTORCH_FILE_NAME = "model.mar" -_REWRAPPER_NAME = "rewrapper" - -_CUSTOM_JOB_DIR = "custom_job" -_INPUT_DIR = "input" -_OUTPUT_DIR = "output" -_OUTPUT_ESTIMATOR_DIR = "output_estimator" -_OUTPUT_PREDICTIONS_DIR = "output_predictions" - -_LOGGER = base.Logger("vertexai.remote_execution") - -warnings.warn(constants._V2_0_WARNING_MSG, DeprecationWarning, stacklevel=1) - - -def _get_model_file_from_image_uri(container_image_uri: str) -> str: - """Gets the model file from the container image URI. - - Args: - container_image_uri (str): - The image URI of the container from the training job. - - Returns: - str: - The model file name. - """ - - # sklearn, TF, PyTorch model extensions for retraining. - # PyTorch serv will need model.mar - if "tf" in container_image_uri: - return "" - elif "sklearn" in container_image_uri: - return _SKLEARN_FILE_NAME - elif "pytorch" in container_image_uri: - # Assume the pretrained model will be pulled for uptraining. - return _PYTORCH_FILE_NAME - else: - raise ValueError("Support loading PyTorch, scikit-learn and TensorFlow only.") - - -def _verify_custom_job(job: aiplatform.CustomJob) -> None: - """Verifies the provided CustomJob was created with SDK 2.0. - - Args: - job (aiplatform.CustomJob): - The CustomJob resource - - Raises: - If the provided job wasn't created with SDK 2.0. - """ - - if ( - not job.labels.get("trained_by_vertex_ai") - or job.labels.get("trained_by_vertex_ai") != "true" - ): - raise ValueError( - "This job wasn't created with SDK remote training, or it was created with a Vertex SDK version <= 1.32.0" - ) - - -def _generate_remote_job_output_path(base_gcs_dir: str) -> str: - """Generates the GCS output path of the remote training job. - - Args: - base_gcs_dir (str): - The base GCS directory for the remote training job. - """ - return os.path.join(base_gcs_dir, _OUTPUT_DIR) - - -def _get_model_from_successful_custom_job( - job_dir: str, -) -> Union["sklearn.base.BaseEstimator", "tf.Module", "torch.nn.Module"]: # noqa: F821 - - serializer = any_serializer.AnySerializer() - - model = serializer.deserialize( - os.path.join(_generate_remote_job_output_path(job_dir), _OUTPUT_ESTIMATOR_DIR) - ) - rewrapper = serializer.deserialize( - os.path.join(_generate_remote_job_output_path(job_dir), _REWRAPPER_NAME) - ) - rewrapper(model) - return model - - -def _register_sklearn_model( - model: "sklearn.base.BaseEstimator", # noqa: F821 - serializer: serializers_base.Serializer, - staging_bucket: str, - rewrapper: Any, -) -> aiplatform.Model: - """Register sklearn model.""" - unique_model_name = ( - f"vertex-ai-registered-sklearn-model-{utils.timestamped_unique_name()}" - ) - gcs_dir = os.path.join(staging_bucket, unique_model_name) - # serialize rewrapper - file_path = os.path.join(gcs_dir, _REWRAPPER_NAME) - serializer.serialize(rewrapper, file_path) - # serialize model - file_path = os.path.join(gcs_dir, _SKLEARN_FILE_NAME) - serializer.serialize(model, file_path) - - container_image_uri = aiplatform.helpers.get_prebuilt_prediction_container_uri( - framework="sklearn", - framework_version="1.0", - ) - - vertex_model = aiplatform.Model.upload( - display_name=unique_model_name, - artifact_uri=gcs_dir, - serving_container_image_uri=container_image_uri, - labels={"registered_by_vertex_ai": "true"}, - sync=True, - ) - - return vertex_model - - -def _register_tf_model( - model: "tensorflow.Module", # noqa: F821 - serializer: serializers_base.Serializer, - staging_bucket: str, - rewrapper: Any, - use_gpu: bool = False, -) -> aiplatform.Model: - """Register TensorFlow model.""" - unique_model_name = ( - f"vertex-ai-registered-tensorflow-model-{utils.timestamped_unique_name()}" - ) - gcs_dir = os.path.join(staging_bucket, unique_model_name) - # serialize rewrapper - file_path = os.path.join(gcs_dir, _TF_DIR_NAME + "/" + _REWRAPPER_NAME) - serializer.serialize(rewrapper, file_path) - # serialize model - file_path = os.path.join(gcs_dir, _TF_DIR_NAME) - # The default serialization format for keras models is "keras", but this - # format is not yet supported by the model upload (eventually prediction - # services). See the code here: - # https://source.corp.google.com/piper///depot/google3/third_party/py/google/cloud/aiplatform/aiplatform/models.py;rcl=561677645;l=3141 - serializer.serialize(model, file_path, save_format="tf") - - container_image_uri = aiplatform.helpers.get_prebuilt_prediction_container_uri( - framework="tensorflow", - framework_version="2.11", - accelerator="gpu" if use_gpu else "cpu", - ) - - vertex_model = aiplatform.Model.upload( - display_name=unique_model_name, - artifact_uri=file_path, - serving_container_image_uri=container_image_uri, - labels={"registered_by_vertex_ai": "true"}, - sync=True, - ) - - return vertex_model - - -def _register_pytorch_model( - model: "torch.nn.Module", # noqa: F821 - serializer: serializers_base.Serializer, - staging_bucket: str, - rewrapper: Any, - use_gpu: bool = False, -) -> aiplatform.Model: - """Register PyTorch model.""" - unique_model_name = ( - f"vertex-ai-registered-pytorch-model-{utils.timestamped_unique_name()}" - ) - gcs_dir = os.path.join(staging_bucket, unique_model_name) - - # serialize rewrapper - file_path = os.path.join(gcs_dir, _REWRAPPER_NAME) - serializer.serialize(rewrapper, file_path) - - # This archive model is required for using prediction pre-built container - archive_file_path = os.path.join(gcs_dir, _PYTORCH_FILE_NAME) - serializer.serialize(model, archive_file_path) - - container_image_uri = aiplatform.helpers.get_prebuilt_prediction_container_uri( - framework="pytorch", - framework_version="1.12", - accelerator="gpu" if use_gpu else "cpu", - ) - - vertex_model = aiplatform.Model.upload( - display_name=unique_model_name, - artifact_uri=gcs_dir, - serving_container_image_uri=container_image_uri, - labels={"registered_by_vertex_ai": "true"}, - sync=True, - ) - - return vertex_model - - -def _get_publisher_model_resource( - short_model_name: str, -) -> _publisher_models._PublisherModel: - """Gets the PublisherModel resource from the short model name. - - Args: - short_model_name (str): - Required. The short name for the model, for example 'text-bison@001' - - Returns: - A _PublisherModel instance pointing to the PublisherModel resource for - this model. - - Raises: - ValueError: - If no PublisherModel resource was found for the given short_model_name. - """ - - if "/" not in short_model_name: - short_model_name = "publishers/google/models/" + short_model_name - - try: - publisher_model_resource = _publisher_models._PublisherModel( - resource_name=short_model_name - ) - return publisher_model_resource - except: # noqa: E722 - raise ValueError("Please provide a valid Model Garden model resource.") - - -def _check_from_pretrained_passed_exactly_one_arg(fn_args: Dict[str, Any]) -> None: - """Checks exactly one argument was passed to from_pretrained. - - This supports an expanding number of arguments added to from_pretrained. - - Args: - fn_args (Dict[str, Any]): - Required. A dictionary of the arguments passed to from_pretrained. - - Raises: - ValueError: - If more than one arg or no args were passed to from_pretrained. - """ - - passed_args = 0 - - for _, argval in fn_args.items(): - if argval is not None: - passed_args += 1 - if passed_args != 1: - raise ValueError( - f"Exactly one of {list(fn_args.keys())} must be provided to from_pretrained." - ) - - -def register( - model: Union[ - "sklearn.base.BaseEstimator", "tf.Module", "torch.nn.Module" # noqa: F821 - ], - use_gpu: bool = False, -) -> aiplatform.Model: - """Registers a model and returns a Model representing the registered Model resource. - - Args: - model (Union["sklearn.base.BaseEstimator", "tensorflow.Module", "torch.nn.Module"]): - Required. An OSS model. Supported frameworks: sklearn, tensorflow, pytorch. - use_gpu (bool): - Optional. Whether to use GPU for model serving. Default to False. - - Returns: - vertex_model (aiplatform.Model): - Instantiated representation of the registered model resource. - - Raises: - ValueError: if default staging bucket is not set - or if the framework is not supported. - """ - staging_bucket = vertexai.preview.global_config.staging_bucket - if not staging_bucket: - raise ValueError( - "A default staging bucket is required to upload the model file. " - "Please call `vertexai.init(staging_bucket='gs://my-bucket')." - ) - - # Unwrap VertexRemoteFunctor before upload to Model Registry. - rewrapper = driver._unwrapper(model) - - serializer = any_serializer.AnySerializer() - try: - if model.__module__.startswith("sklearn"): - return _register_sklearn_model(model, serializer, staging_bucket, rewrapper) - - elif model.__module__.startswith("keras") or ( - hasattr(model, "_tracking_metadata") - ): # pylint: disable=protected-access - return _register_tf_model( - model, serializer, staging_bucket, rewrapper, use_gpu - ) - - elif "torch" in model.__module__ or (hasattr(model, "state_dict")): - return _register_pytorch_model( - model, serializer, staging_bucket, rewrapper, use_gpu - ) - - else: - raise ValueError( - "Support uploading PyTorch, scikit-learn and TensorFlow only." - ) - except Exception as e: - raise e - finally: - rewrapper(model) - - -def from_pretrained( - *, - model_name: Optional[str] = None, - custom_job_name: Optional[str] = None, - foundation_model_name: Optional[str] = None, -) -> Union["sklearn.base.BaseEstimator", "tf.Module", "torch.nn.Module"]: # noqa: F821 - """Pulls a model from Model Registry or from a CustomJob ID for retraining. - - The returned model is wrapped with a Vertex wrapper for running remote jobs on Vertex, - unless an unwrapped model was registered to Model Registry. - - Args: - model_name (str): - Optional. The resource ID or fully qualified resource name of a registered model. - Format: "12345678910" or - "projects/123/locations/us-central1/models/12345678910@1". One of `model_name`, - `custom_job_name`, or `foundation_model_name` is required. - custom_job_name (str): - Optional. The resource ID or fully qualified resource name of a CustomJob created - with Vertex SDK remote training. If the job has completed successfully, this will load - the trained model created in the CustomJob. One of `model_name`, `custom_job_name`, or - `foundation_model_name` is required. - foundation_model_name (str): - Optional. The name of the foundation model to load. For example: "text-bison@001". One of - `model_name`,`custom_job_name`, or `foundation_model_name` is required. - - Returns: - model: local model for uptraining. - - Raises: - ValueError: - If registered model is not registered through `vertexai.preview.register` - If custom job was not created with Vertex SDK remote training - If both or neither model_name or custom_job_name are provided - """ - _check_from_pretrained_passed_exactly_one_arg(locals()) - - project = vertexai.preview.global_config.project - location = vertexai.preview.global_config.location - credentials = vertexai.preview.global_config.credentials - - if model_name: - - vertex_model = aiplatform.Model( - model_name, project=project, location=location, credentials=credentials - ) - if vertex_model.labels.get("registered_by_vertex_ai") == "true": - - artifact_uri = vertex_model.uri - model_file = _get_model_file_from_image_uri( - vertex_model.container_spec.image_uri - ) - - serializer = any_serializer.AnySerializer() - model = serializer.deserialize(os.path.join(artifact_uri, model_file)) - - rewrapper = serializer.deserialize( - os.path.join(artifact_uri, _REWRAPPER_NAME) - ) - - # Rewrap model (in-place) for following remote training. - rewrapper(model) - return model - - elif not vertex_model.labels: - raise ValueError( - f"The model {model_name} was not registered through `vertexai.preview.register` or created from Model Garden." - ) - else: - # Get the labels and check if it's a tuned model from a PublisherModel resource - for label_key in vertex_model.labels: - publisher_model_label = vertex_model.labels.get(label_key) - publisher_model_label_format_match = r"(^[a-z]+-[a-z]+-[0-9]{3}$)" - - if re.match(publisher_model_label_format_match, publisher_model_label): - # This try/except ensures this method will iterate over all models in a label even - # if one fails on PublisherModel resource creation - short_model_id = ( - _language_models._get_model_id_from_tuning_model_id( - publisher_model_label - ) - ) - - try: - publisher_model = _get_publisher_model_resource(short_model_id) - return _model_garden_models._from_pretrained( - model_name=short_model_id, - publisher_model=publisher_model, - tuned_vertex_model=vertex_model, - ) - - except ValueError: - continue - raise ValueError( - f"The model {model_name} was not created from a Model Garden model." - ) - - if custom_job_name: - job = aiplatform.CustomJob.get( - custom_job_name, project=project, location=location, credentials=credentials - ) - job_state = job.state - - _verify_custom_job(job) - job_dir = job.job_spec.base_output_directory.output_uri_prefix - - if job_state in aiplatform_jobs._JOB_PENDING_STATES: - _LOGGER.info( - f"The CustomJob {job.name} is still running. When the job has completed successfully, your model will be returned." - ) - training._get_remote_logs_until_complete(job) - # Get the new job state after it has completed - job_state = job.state - - if job_state == gca_job_state.JobState.JOB_STATE_SUCCEEDED: - return _get_model_from_successful_custom_job(job_dir) - else: - raise ValueError( - "The provided job did not complete successfully. Please provide a pending or successful customJob ID." - ) - - if foundation_model_name: - publisher_model = _get_publisher_model_resource(foundation_model_name) - return _model_garden_models._from_pretrained( - model_name=foundation_model_name, publisher_model=publisher_model - ) diff --git a/vertexai/preview/_workflow/shared/supported_frameworks.py b/vertexai/preview/_workflow/shared/supported_frameworks.py deleted file mode 100644 index d24360b7ca..0000000000 --- a/vertexai/preview/_workflow/shared/supported_frameworks.py +++ /dev/null @@ -1,367 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import importlib - -try: - from importlib import metadata as importlib_metadata -except ImportError: - import importlib_metadata -import inspect -import sys -from typing import Any, List, Tuple -import warnings - -from google.cloud.aiplatform import base -from packaging import version - - -_LOGGER = base.Logger(__name__) - -# This most likely needs to be map -REMOTE_FRAMEWORKS = frozenset(["sklearn", "keras", "lightning"]) - -REMOTE_TRAINING_MODEL_UPDATE_ONLY_OVERRIDE_LIST = frozenset(["fit", "train"]) - -# Methods that change the state of the object during a training workflow -REMOTE_TRAINING_STATEFUL_OVERRIDE_LIST = frozenset(["fit", "train", "fit_transform"]) - -# Methods that don't change the state of the object during a training workflow -REMOTE_TRAINING_FUNCTIONAL_OVERRIDE_LIST = frozenset(["transform"]) - -# Methods involved in training process -REMOTE_TRAINING_OVERRIDE_LIST = ( - REMOTE_TRAINING_STATEFUL_OVERRIDE_LIST | REMOTE_TRAINING_FUNCTIONAL_OVERRIDE_LIST -) - -REMOTE_PREDICTION_OVERRIDE_LIST = frozenset(["predict"]) - -REMOTE_OVERRIDE_LIST = REMOTE_TRAINING_OVERRIDE_LIST.union( - REMOTE_PREDICTION_OVERRIDE_LIST -) - - -LIBRARY_TO_MODULE_MAP = {"scikit-learn": "sklearn", "tf-models-official": "official"} - - -def _get_version_for_package(package_name: str) -> str: - try: - # Note: this doesn't work in the internal environment since - # importlib.metadata relies on the directory site-packages to collect - # the metadata of Python packages. - return importlib_metadata.version(package_name) - except importlib_metadata.PackageNotFoundError: - _LOGGER.info( - "Didn't find package %s via importlib.metadata. Trying to import it.", - package_name, - ) - try: - if package_name in LIBRARY_TO_MODULE_MAP: - module_name = LIBRARY_TO_MODULE_MAP[package_name] - else: - # Note: this assumes the top-level module name is the same as the - # package name after replacing "-" in the package name by "_". - # This is not always true. - module_name = package_name.replace("-", "_") - - module = importlib.import_module(module_name) - # This assumes the top-level module has __version__ attribute, but this - # is not always true. - return module.__version__ - except Exception as exc: - raise RuntimeError(f"{package_name} is not installed.") from exc - - -def _get_mro(cls_or_ins: Any) -> Tuple[Any, ...]: - if inspect.isclass(cls_or_ins): - return cls_or_ins.__mro__ - else: - return cls_or_ins.__class__.__mro__ - - -# pylint: disable=g-import-not-at-top -def _is_keras(cls_or_ins: Any) -> bool: - try: - global keras - from tensorflow import keras - - return keras.layers.Layer in _get_mro(cls_or_ins) - except ImportError: - return False - - -def _is_sklearn(cls_or_ins: Any) -> bool: - try: - global sklearn - import sklearn - - return sklearn.base.BaseEstimator in _get_mro(cls_or_ins) - except ImportError: - return False - - -def _is_lightning(cls_or_ins: Any) -> bool: - try: - global torch - global lightning - import torch - import lightning - - return lightning.pytorch.trainer.trainer.Trainer in _get_mro(cls_or_ins) - except ImportError: - return False - - -def _is_torch(cls_or_ins: Any) -> bool: - try: - global torch - import torch - - return torch.nn.modules.module.Module in _get_mro(cls_or_ins) - except ImportError: - return False - - -def _is_torch_dataloader(cls_or_ins: Any) -> bool: - try: - global torch - import torch - - return torch.utils.data.DataLoader in _get_mro(cls_or_ins) - except ImportError: - return False - - -def _is_tensorflow(cls_or_ins: Any) -> bool: - try: - global tf - import tensorflow as tf - - return tf.Module in _get_mro(cls_or_ins) - except ImportError: - return False - - -def _is_pandas_dataframe(possible_dataframe: Any) -> bool: - try: - global pd - import pandas as pd - - return pd.DataFrame in _get_mro(possible_dataframe) - except ImportError: - return False - - -def _is_bigframe(possible_dataframe: Any) -> bool: - try: - global bf - import bigframes as bf - from bigframes.dataframe import DataFrame - - return DataFrame in _get_mro(possible_dataframe) - except ImportError: - return False - - -# pylint: enable=g-import-not-at-top -def _is_oss(cls_or_ins: Any) -> bool: - return any( - [_is_sklearn(cls_or_ins), _is_keras(cls_or_ins), _is_lightning(cls_or_ins)] - ) - - -# pylint: disable=undefined-variable -def _get_deps_if_sklearn_model(model: Any) -> List[str]: - deps = [] - if _is_sklearn(model): - dep_version = version.Version(sklearn.__version__).base_version - deps.append(f"scikit-learn=={dep_version}") - return deps - - -def _get_deps_if_tensorflow_model(model: Any) -> List[str]: - deps = [] - if _is_tensorflow(model): - dep_version = version.Version(tf.__version__).base_version - deps.append(f"tensorflow=={dep_version}") - return deps - - -def _get_deps_if_torch_model(model: Any) -> List[str]: - deps = [] - if _is_torch(model): - dep_version = version.Version(torch.__version__).base_version - deps.append(f"torch=={dep_version}") - return deps - - -def _get_deps_if_lightning_model(model: Any) -> List[str]: - deps = [] - if _is_lightning(model): - lightning_version = version.Version(lightning.__version__).base_version - torch_version = version.Version(torch.__version__).base_version - deps.append(f"lightning=={lightning_version}") - deps.append(f"torch=={torch_version}") - try: - global tensorboard - import tensorboard - - tensorboard_version = version.Version(tensorboard.__version__).base_version - deps.append(f"tensorboard=={tensorboard_version}") - except ImportError: - pass - try: - global tensorboardX - import tensorboardX - - tensorboardX_version = version.Version( - tensorboardX.__version__ - ).base_version - deps.append(f"tensorboardX=={tensorboardX_version}") - except ImportError: - pass - - return deps - - -def _get_deps_if_torch_dataloader(obj: Any) -> List[str]: - deps = [] - if _is_torch_dataloader(obj): - dep_version = version.Version(torch.__version__).base_version - deps.append(f"torch=={dep_version}") - deps.extend(_get_cloudpickle_deps()) - return deps - - -def _get_cloudpickle_deps() -> List[str]: - deps = [] - try: - global cloudpickle - import cloudpickle - - dep_version = version.Version(cloudpickle.__version__).base_version - deps.append(f"cloudpickle=={dep_version}") - except ImportError as e: - raise ImportError( - "Cloudpickle is not installed. Please call `pip install google-cloud-aiplatform[preview]`." - ) from e - - return deps - - -def _get_deps_if_pandas_dataframe(possible_dataframe: Any) -> List[str]: - deps = [] - if _is_pandas_dataframe(possible_dataframe): - dep_version = version.Version(pd.__version__).base_version - deps.append(f"pandas=={dep_version}") - deps += _get_pyarrow_deps() - # Note: it's likely that a DataFrame can be changed to other format, and - # therefore needs to be serialized by CloudPickleSerializer. An example - # is sklearn's Transformer.fit_transform() method, whose output is always - # a ndarray. - deps += _get_cloudpickle_deps() - return deps - - -def _get_pyarrow_deps() -> List[str]: - deps = [] - try: - global pyarrow - import pyarrow - - dep_version = version.Version(pyarrow.__version__).base_version - deps.append(f"pyarrow=={dep_version}") - except ImportError: - deps.append("pyarrow") - return deps - - -def _get_numpy_deps() -> List[str]: - deps = [] - try: - global numpy - import numpy - - dep_version = version.Version(numpy.__version__).base_version - deps.append(f"numpy=={dep_version}") - except ImportError: - deps.append("numpy") - return deps - - -def _get_pandas_deps() -> List[str]: - deps = [] - try: - global pd - import pandas as pd - - dep_version = version.Version(pd.__version__).base_version - deps.append(f"pandas=={dep_version}") - except ImportError: - deps.append("pandas") - return deps - - -# pylint: enable=undefined-variable - - -def _get_estimator_requirement(estimator: Any) -> List[str]: - """Returns a list of requirements given an estimator.""" - deps = [] - deps.extend(_get_numpy_deps()) - deps.extend(_get_pandas_deps()) - deps.extend(_get_cloudpickle_deps()) - deps.extend(_get_deps_if_sklearn_model(estimator)) - deps.extend(_get_deps_if_tensorflow_model(estimator)) - deps.extend(_get_deps_if_torch_model(estimator)) - deps.extend(_get_deps_if_lightning_model(estimator)) - # dedupe the dependencies by casting it to a dict first (dict perserves the - # order while set doesn't) - return list(dict.fromkeys(deps)) - - -def _get_python_minor_version() -> str: - # this will generally be the container with least or no security vulnerabilities - return ".".join(sys.version.split()[0].split(".")[0:2]) - - -def _get_cpu_container_uri() -> str: - """Returns the container uri used for cpu training.""" - return f"python:{_get_python_minor_version()}" - - -def _get_gpu_container_uri(estimator: Any) -> str: - """Returns the container uri used for gpu training given an estimator.""" - local_python_version = _get_python_minor_version() - if _is_tensorflow(estimator): - if local_python_version != "3.10": - warnings.warn( - f"Your local runtime has python{local_python_version}, but your " - "remote GPU training will be executed in python3.10" - ) - return "us-docker.pkg.dev/vertex-ai/training/tf-gpu.2-11.py310:latest" - - elif _is_torch(estimator) or _is_lightning(estimator): - if local_python_version != "3.10": - warnings.warn( - f"Your local runtime has python{local_python_version}, but your " - "remote GPU training will be executed in python3.10" - ) - return "pytorch/pytorch:2.0.0-cuda11.7-cudnn8-runtime" - - else: - raise ValueError(f"{estimator} is not supported for GPU training.") diff --git a/vertexai/preview/developer/__init__.py b/vertexai/preview/developer/__init__.py deleted file mode 100644 index 0039dc8e04..0000000000 --- a/vertexai/preview/developer/__init__.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import warnings -from vertexai.preview._workflow.serialization_engine import ( - any_serializer, -) -from vertexai.preview._workflow.serialization_engine import ( - serializers_base, -) -from vertexai.preview._workflow.shared import ( - configs, - constants, -) -from vertexai.preview.developer import mark -from vertexai.preview.developer import remote_specs - - -warnings.warn(constants._V2_0_WARNING_MSG, DeprecationWarning, stacklevel=1) - -PersistentResourceConfig = configs.PersistentResourceConfig -Serializer = serializers_base.Serializer -SerializationMetadata = serializers_base.SerializationMetadata -SerializerArgs = serializers_base.SerializerArgs -RemoteConfig = configs.RemoteConfig -WorkerPoolSpec = remote_specs.WorkerPoolSpec -WorkerPoolSepcs = remote_specs.WorkerPoolSpecs - -register_serializer = any_serializer.register_serializer - - -__all__ = ( - "mark", - "PersistentResourceConfig", - "register_serializer", - "Serializer", - "SerializerArgs", - "SerializationMetadata", - "RemoteConfig", - "WorkerPoolSpec", - "WorkerPoolSepcs", -) diff --git a/vertexai/preview/developer/base_classes.py b/vertexai/preview/developer/base_classes.py deleted file mode 100644 index 255fe5185c..0000000000 --- a/vertexai/preview/developer/base_classes.py +++ /dev/null @@ -1,28 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -""" -Placeholder for base Model and FeatureTransformer classes. -""" - - -class Model: - pass - - -class FeatureTransformer: - pass diff --git a/vertexai/preview/developer/mark.py b/vertexai/preview/developer/mark.py deleted file mode 100644 index c2fa4f40bc..0000000000 --- a/vertexai/preview/developer/mark.py +++ /dev/null @@ -1,197 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import functools -import inspect -from typing import Any, Callable, List, Optional, Union - -from vertexai.preview._workflow.driver import remote -from vertexai.preview._workflow.executor import ( - remote_container_training, -) -from vertexai.preview._workflow.executor import ( - training, - prediction, -) -from vertexai.preview._workflow.shared import configs -from vertexai.preview.developer import remote_specs - - -def train( - remote_config: Optional[configs.RemoteConfig] = None, -) -> Callable[..., Any]: - """Decorator to enable Vertex remote training on a method. - - Example Usage: - ``` - vertexai.init( - project="my-project", - location="my-location", - staging_bucket="gs://my-bucket", - ) - vertexai.preview.init(remote=True) - - class MyModel(vertexai.preview.VertexModel): - ... - - @vertexai.preview.developer.mark.train() - def my_train_method(...): - ... - - model = MyModel(...) - - # This train method will be executed remotely - model.my_train_method(...) - ``` - - Args: - remote_config (config.RemoteConfig): - Optional. A class that holds the configuration for the remote job. - - Returns: - A wrapped method with its original signature. - """ - - def remote_training_wrapper(method: Callable[..., Any]) -> Callable[..., Any]: - functor = remote.remote_method_decorator(method, training.remote_training) - if remote_config is not None: - if inspect.ismethod(method): - functor.vertex.remote_config = remote_config - else: - functor.vertex = functools.partial( - configs.VertexConfig, remote_config=remote_config - ) - - return functor - - return remote_training_wrapper - - -# pylint: disable=protected-access -def _remote_container_train( - image_uri: str, - additional_data: List[ - Union[remote_specs._InputParameterSpec, remote_specs._OutputParameterSpec] - ], - remote_config: Optional[configs.DistributedTrainingConfig] = None, -) -> Callable[..., Any]: - """Decorator to enable remote training with a container image. - - This decorator takes the parameters from the __init__ function (requires - setting up binding outside of the decorator) and the function that it - decorates, preprocesses the arguments, and launches a custom job for - training. - - As the custom job is running, the inputs are read and parsed according to - the container code, and the outputs are written to the GCS paths specified - for each output field. - - If the custom job succeeds, the decorator deserializes the outputs from the - custom job and sets them as instance attributes. Each output will be either - a string or bytes, and the function this decorator decorates may - additionally post-process the outputs to their corresponding types. - - Args: - image_uri (str): - Required. The pre-built docker image uri for CustomJob. - additional_data (List): - Required. A list of input and output parameter specs. - remote_config (config.DistributedTrainingConfig): - Optional. A class that holds the configuration for the distributed - training remote job. - - Returns: - An inner decorator that returns the decorated remote container training - function. - - Raises: - ValueError if the decorated function has a duplicate argument name as - the parameters in existing binding, or if an additional data is neither - an input parameter spec or an output parameter spec. - """ - - def remote_training_wrapper(method: Callable[..., Any]) -> Callable[..., Any]: - functor = remote.remote_method_decorator( - method, - remote_container_training.train, - remote_executor_kwargs={ - "image_uri": image_uri, - "additional_data": additional_data, - }, - ) - config = remote_config or configs.DistributedTrainingConfig() - if inspect.ismethod(method): - functor.vertex.remote_config = config - functor.vertex.remote = True - else: - functor.vertex = functools.partial( - configs.VertexConfig, remote=True, remote_config=config - ) - - return functor - - return remote_training_wrapper - - -def predict( - remote_config: Optional[configs.RemoteConfig] = None, -) -> Callable[..., Any]: - """Decorator to enable Vertex remote prediction on a method. - - Example Usage: - ``` - vertexai.init( - project="my-project", - location="my-location", - staging_bucket="gs://my-bucket", - ) - vertexai.preview.init(remote=True) - - class MyModel(vertexai.preview.VertexModel): - ... - - @vertexai.preview.developer.mark.predict() - def my_predict_method(...): - ... - - model = MyModel(...) - - # This train method will be executed remotely - model.my_predict_method(...) - ``` - - Args: - remote_config (config.RemoteConfig): - Optional. A class that holds the configuration for the remote job. - - Returns: - A wrapped method with its original signature. - """ - - def remote_prediction_wrapper(method: Callable[..., Any]) -> Callable[..., Any]: - functor = remote.remote_method_decorator(method, prediction.remote_prediction) - if remote_config is not None: - if inspect.ismethod(method): - functor.vertex.remote_config = remote_config - else: - functor.vertex = functools.partial( - configs.VertexConfig, remote_config=remote_config - ) - - return functor - - return remote_prediction_wrapper diff --git a/vertexai/preview/developer/remote_specs.py b/vertexai/preview/developer/remote_specs.py deleted file mode 100644 index c8a418b880..0000000000 --- a/vertexai/preview/developer/remote_specs.py +++ /dev/null @@ -1,892 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -"""Remote workload specs and helper functions for developer.mark.fit. - -""" - -import dataclasses -import json -import os -import tempfile - -from typing import Any, Dict, List, Optional - -from google.cloud.aiplatform import base -from google.cloud.aiplatform.utils import gcs_utils -from google.cloud.aiplatform.utils import worker_spec_utils -from google.cloud.aiplatform.preview import resource_pool_utils -from vertexai.preview._workflow.serialization_engine import ( - serializers, -) - - -_LOGGER = base.Logger(__name__) - -_LITERAL: str = "literal" -_PARQUET: str = "parquet" -_CLOUDPICKLE: str = "cloudpickle" - -# Constants for serializer. -_SERIALIZER = frozenset([_LITERAL, _PARQUET, _CLOUDPICKLE]) - -_METADATA_FILE_NAME = "metadata" -_DATA_FILE_NAME = "data.parquet" -_FLOAT16 = "float16" -_FLOAT32 = "float32" -_FLOAT64 = "float64" -_INT8 = "int8" -_INT16 = "int16" -_INT32 = "int32" -_INT64 = "int64" -_UINT8 = "uint8" -_UINT16 = "uint16" -_UINT32 = "uint32" -_UINT64 = "uint64" -_SUPPORTED_NUMERICAL_DTYPES = [ - _FLOAT16, - _FLOAT32, - _FLOAT64, - _INT8, - _INT16, - _INT32, - _INT64, - _UINT8, - _UINT16, - _UINT32, - _UINT64, -] -_DENSE = "dense" - -# Constants for deserializer. -_DESERIALIZER = frozenset([_LITERAL, _CLOUDPICKLE]) - -# Constants for Cluster and ClusterSpec -_CHIEF = "workerpool0" -_WORKER = "workerpool1" -_SERVER = "workerpool2" -_EVALUATOR = "workerpool3" -_WORKER_POOLS = frozenset([_CHIEF, _WORKER, _SERVER, _EVALUATOR]) -_CLUSTER = "cluster" -_TASK = "task" -_TYPE = "type" -_INDEX = "index" -_TRIAL = "trial" - -_CLUSTER_SPEC = "CLUSTER_SPEC" -_MASTER_ADDR = "MASTER_ADDR" -_MASTER_PORT = "MASTER_PORT" - - -def _gen_gcs_path(base_dir: str, name: str) -> str: - """Generates a GCS path for a file or a directory. - - The created path can be used for either a file or a directory. If it is a - file, we can directly write to it. If it is a directory, file paths could - be generated by joining the derectly path and the file names. - - Example usages: - 1. When passing in parameters to a custom job, we will be able to write - serialized parameter value content to a GCS path in 'cloudpickle' mode. - 2. We will also provide GCS paths so that the custom job can write the - output parameter values to the dedicated paths. - - Args: - base_dir (str): - Required. The base GCS directory. Must be a valid GCS path that - starts with 'gs://'. - name (str): - Required. The name of a file or directory. If name ends with '/', - removes '/' for consistency since we do not need the suffix to - identify the path as a directory. - Returns: - The generated GCS path. - Raises: - ValueError if the input base_dir is not a valid GCS path. - """ - if not base_dir.startswith("gs://"): - raise ValueError(f"base_dir {base_dir} is not a valid GCS path.") - name = name[:-1] if name.endswith("/") else name - return os.path.join(base_dir, name) - - -def _get_argument_name(name: str) -> str: - """Gets an argument name for the inputs and outputs of a container. - - 1. If the name contains dots such as a.b.arg_name or self.arg_name, use the - string following the right-most dot (arg_name) as the argument name. - 2. If the name has a single leading underscore, such as _arg_name, remove - the leading underscore in the argument name (arg_name). If the name has a - double leading underscore such as __arg_name, use the argument name - __arg_name directly. - - Args: - name (str): - Required. The name of the parameter in the InputParameterSpec. - - Returns: - The name of the argument in the container. - """ - argument_name = name.split(".")[-1] - if argument_name.startswith("_") and not argument_name.startswith("__"): - argument_name = argument_name[1:] - if not argument_name: - raise ValueError(f"Failed to get argument name from name {name}.") - return argument_name - - -@dataclasses.dataclass -class _FeatureMetadata: - dtype: str - feature_type: str = _DENSE - - -@dataclasses.dataclass -class _CategoricalFeatureMetadata: - dtype: str - categories: List[Any] - feature_type: str = _DENSE - - -@dataclasses.dataclass -class _TaskInfo: - """Describes the task of the particular node on which code is running. - - Args: - task_type (str): - Required. The type of worker pool this task is running in. One of 'workerpool0' for chief, 'workerpool1' for worker, 'workerpool2' for server or 'workerpool3' for evaluator. - task_index (int): - Required. The zero-based index of the task. If a training job has two workers, this value is set to 0 on one and 1 on the other. - task_trial (int): - Optional. The identifier of the hyperparameter tuning trial currently running. - """ - - task_type: str - task_index: int - task_trial: int = None - - -class _InputParameterSpec: - """Input parameter spec for remote trainers.""" - - def __init__( - self, - name: str, - argument_name: Optional[str] = None, - serializer: str = _LITERAL, - ) -> None: - """Initializes an _InputParameterSpec instance. - - When creating CustomJob spec, each _InputParameterSpec will be - transformed into a custom job input. - - Args: - name (str): - Required. The parameter name that stores the input value. - argument_name (str): - Optional. The argument name for the custom job input. If not - specified, an argument_name will be derived from name. - serializer (str): - Optional. The serializer for the input. Must be one of - 'literal', 'parquet', and 'cloudpickle'. - - Raises: - ValueError: If name or serializer is invalid. - """ - if not name: - raise ValueError("Input parameter name cannot be empty.") - self.name = name - self.argument_name = argument_name or _get_argument_name(name) - if serializer not in _SERIALIZER: - raise ValueError( - f"Invalid serializer {serializer} for {name}. Please" - f"choose one of {list(_SERIALIZER)}." - ) - self.serializer = serializer - - def format_arg(self, input_dir: str, binding: Dict[str, Any]) -> Any: - """Formats an argument based on the spec. - - Args: - input_dir (str): - Required. The GCS input directory to save the serialized input - value when necessary. - binding (Dict[str, Any]): - Required. A dictionary that contains maps an input name to its - value. - - Returns: - The formatted argument. - - Raises: - ValueError if the input is not found in binding, tries to serialize - a non-pandas.DataFrame to parquet, or the serialization format is - not supported. - """ - try: - # pylint: disable=g-import-not-at-top - import pandas as pd - except ImportError: - raise ImportError( - "pandas is not installed and is required for remote training." - ) from None - if self.name not in binding: - raise ValueError(f"Input {self.name} not found in binding: " f"{binding}.") - - value = binding[self.name] - if self.serializer == _LITERAL: - return value - - gcs_path = _gen_gcs_path(input_dir, self.argument_name) - if self.serializer == _PARQUET: - if not isinstance(value, pd.DataFrame): - raise ValueError( - "Parquet serializer is only supported for " - f"pandas.DataFrame, but {self.name} has type " - f"{type(value)}." - ) - # Serializes data - data_serializer = serializers.PandasDataSerializer() - data_path = _gen_gcs_path(gcs_path, _DATA_FILE_NAME) - data_serializer.serialize( - to_serialize=value, - gcs_path=data_path, - ) - - # Serializes feature metadata - metadata_serializer = serializers.CloudPickleSerializer() - metadata_path = _gen_gcs_path(gcs_path, _METADATA_FILE_NAME) - feature_metadata = _generate_feature_metadata(value) - metadata_serializer.serialize( - to_serialize=feature_metadata, gcs_path=metadata_path - ) - - elif self.serializer == _CLOUDPICKLE: - serializer = serializers.CloudPickleSerializer() - serializer.serialize( - to_serialize=value, - gcs_path=gcs_path, - ) - - else: - raise ValueError( - f"Unsupported serializer: {self.serializer}." - "The input serializer must be one of " - f"{_SERIALIZER}." - ) - return gcs_path - - -class _OutputParameterSpec: - """Output parameter spec for remote trainers.""" - - def __init__( - self, - name: str, - argument_name: Optional[str] = None, - deserializer: Optional[str] = _LITERAL, - ) -> None: - """Initializes an OutputParameterSpec instance. - - When creating CustomJob spec, each OutputParameterSpec will be - transformed into a custom job argument that will store the output value. - - Args: - name (str): - Required. The parameter name that will store the output value. - argument_name (str): - Optional. The argument name for the custom job argument. If not - specified, an argument_name will be derived from name. - deserializer (str): - Optional. The deserializer for the output. Must be one of - 'literal', and 'cloudpickle'. - - Raises: - ValueError: If name or deserializer is invalid. - """ - if not name: - raise ValueError("Output parameter name cannot be empty.") - self.name = name - self.argument_name = argument_name or _get_argument_name(name) - if deserializer not in _DESERIALIZER: - raise ValueError( - f"Invalid deserializer {deserializer} for {name}. Please" - f"choose one of {list(_DESERIALIZER)}." - ) - self.deserializer = deserializer - - def deserialize_output(self, gcs_path: str) -> Any: - """Deserializes an output based on the spec. - - Args: - gcs_path (str): - Required. The gcs path containing the output. - - Returns: - The deserialized output. - - Raises: - ValueError if the deserialization format is unsupported. - """ - if self.deserializer == _LITERAL: - with tempfile.NamedTemporaryFile() as temp_file: - gcs_utils.download_file_from_gcs(gcs_path, temp_file.name) - with open(temp_file.name, "r") as f: - return f.read() - elif self.deserializer == _CLOUDPICKLE: - serializer = serializers.CloudPickleSerializer() - return serializer.deserialize(serialized_gcs_path=gcs_path) - else: - raise ValueError(f"Unsupported deserializer: {self.deserializer}.") - - -def _generate_feature_metadata(df: Any) -> Dict[str, Any]: - """Helper function to generate feature metadata from a pandas DataFrame. - - When column types are not supported, the corresponding columns are excluded - from feature metadata. - - Args: - df (pandas.DataFrame): - Required. A DataFrame to generate feature metadata from. - - Returns: - A dictionary that maps column names to metadata. - - Raises: - ValueError if df is not a valid/ supported DataFrame. - """ - try: - # pylint: disable=g-import-not-at-top - import pandas as pd - except ImportError: - raise ImportError( - "pandas is not installed and is required for remote training." - ) from None - - if not isinstance(df, pd.DataFrame): - raise ValueError( - "Generating feature metadata is only supported for " - f"pandas.DataFrame, but {df} has type {type(df)}." - ) - - feature_metadata = {} - for col in df.columns: - if df[col].dtypes in _SUPPORTED_NUMERICAL_DTYPES: - feature_metadata[str(col)] = dataclasses.asdict( - _FeatureMetadata(str(df[col].dtypes)) - ) - # Ignores categorical columns that are not integers. - elif df[col].dtypes == "category" and df[col].cat.categories.dtype == _INT64: - categories = df[col].cat.categories.tolist() - feature_metadata[str(col)] = dataclasses.asdict( - _CategoricalFeatureMetadata(_INT64, categories) - ) - else: - # Ignores unsupported column type. - pass - return feature_metadata - - -class _Cluster: - """Represents a Cluster as a set of "tasks". - - Task type or worker pool can be one of chief, worker, server or evaluator. - - To create a cluster with two task types and three tasks, specify the - mapping from worker pool to list of network addresses. - - ```python - cluster = Cluster({"workerpool0": ["cmle-training-workerpool0-ab-0:2222"], - "workerpool1": ["cmle-training-workerpool1-ab-0:2222", - "cmle-training-workerpool1-ab-1:2222"]}) - ``` - """ - - def __init__(self, cluster_info: Dict[str, Any]): - """Initializes a Cluster instance. - - The cluster description contains a list of tasks for each - task type or worker pool specified in a CustomJob. - - Args: - cluster_info (Dict[str, Any]): Required. The cluster description - containing the list of tasks for each task type. - - Raises: - ValueError: If cluster description contains invalid task types. - """ - for task_type in cluster_info: - if task_type not in _WORKER_POOLS: - raise ValueError( - f"Invalid task type: {task_type}. Must be one of {_WORKER_POOLS}." - ) - self.cluster_info = cluster_info - - # Different worker pool types - @property - def chief_task_type(self) -> str: - return _CHIEF - - @property - def worker_task_type(self) -> str: - return _WORKER - - @property - def server_task_type(self) -> str: - return _SERVER - - @property - def evaluator_task_type(self) -> str: - return _EVALUATOR - - @property - def task_types(self) -> List[str]: - """Returns a list of task types in this cluster. - - Returns: - A list of task types in this cluster. - """ - return list(self.cluster_info.keys()) - - def get_num_tasks(self, task_type): - """Returns the number of tasks of a given task type. - - Args: - task_type (str): The task type. - - Returns: - The number of tasks of the given task type. - """ - if task_type not in self.cluster_info: - return 0 - return len(self.cluster_info[task_type]) - - def get_task_addresses(self, task_type): - """Returns list of task address for the task type. - - Args: - task_type (str): The task type. - - Returns: - A list of task address for the given task type. - - Raises: - ValueError: If the task type passed does not exist in the cluster. - """ - if task_type not in self.cluster_info: - raise ValueError(f"No such task type in cluster: {task_type}") - return self.cluster_info[task_type] - - -class _ClusterSpec: - """ClusterSpec for a distributed training job.""" - - def __init__(self, cluster_spec: Dict[str, Any]): - """Initializes a ClusterSpec instance. - - Vertex AI populates an environment variable, CLUSTER_SPEC, on every - replica to describe how the overall cluster is set up. For - distributed - training, this environment variable will be used to create a - ClusterSpec. - - A sample CLUSTER_SPEC: - ``` - { - "cluster": { - "workerpool0": [ - "cmle-training-workerpool0-ab-0:2222" - ], - "workerpool1": [ - "cmle-training-workerpool1-ab-0:2222", - "cmle-training-workerpool1-ab-1:2222" - ], - "workerpool2": [ - "cmle-training-workerpool2-ab-0:2222" - ], - "workerpool3": [ - "cmle-training-workerpool3-ab-0:2222" - ] - }, - "environment":"cloud", - "task":{ - "type": "workerpool0", - "index": 0 - } - } - ``` - Args: - cluster_spec (Dict[str, Any]): Required. The cluster spec - containing the cluster and current task specification. - - Raises: - ValueError: If `cluster_spec` is missing required keys. - """ - if _CLUSTER not in cluster_spec or _TASK not in cluster_spec: - raise ValueError(f"`cluster_spec` must contain {_CLUSTER} and {_TASK}") - self.cluster = _Cluster(cluster_spec[_CLUSTER]) - self.task = _TaskInfo( - task_type=cluster_spec[_TASK][_TYPE], - task_index=cluster_spec[_TASK][_INDEX], - task_trial=cluster_spec[_TASK].get(_TRIAL, None), - ) - - def get_rank(self): - """Returns the world rank of the current task. - - Returns: - The world rank of the current task. - """ - task_type = self.task.task_type - task_index = self.task.task_index - - if task_type == self.cluster.chief_task_type: - return 0 - if task_type == self.cluster.worker_task_type: - return task_index + 1 - - num_workers = self.cluster.get_num_tasks(self.cluster.worker_task_type) - if task_type == self.cluster.server_task_type: - return num_workers + task_index + 1 - - num_ps = self.cluster.get_num_tasks(self.cluster.server_task_type) - if task_type == self.cluster.evaluator_task_type: - return num_ps + num_workers + task_index + 1 - - def get_world_size(self): - """Returns the world size (total number of workers) for the current run. - - Returns: - The world size for the current run. - """ - num_chief = self.cluster.get_num_tasks(self.cluster.chief_task_type) - num_workers = self.cluster.get_num_tasks(self.cluster.worker_task_type) - num_ps = self.cluster.get_num_tasks(self.cluster.server_task_type) - num_evaluators = self.cluster.get_num_tasks(self.cluster.evaluator_task_type) - - return num_chief + num_workers + num_ps + num_evaluators - - def get_chief_address_port(self): - """Returns address and port for chief task. - - Returns: - A tuple of task address and port. - - Raises: - ValueError: If the chief task type does not exist in the cluster - """ - if self.cluster.chief_task_type not in self.cluster.task_types: - raise ValueError("Cluster must have a chief task.") - chief_task = self.cluster.get_task_addresses(self.cluster.chief_task_type)[0] - address, port = chief_task.split(":") - return address, int(port) - - -# pylint: disable=protected-access -class WorkerPoolSpec(worker_spec_utils._WorkerPoolSpec): - """Wraps class that holds a worker pool spec configuration. - - Attributes: - replica_count (int): - The number of worker replicas. - machine_type (str): - The type of machine to use for remote training. - accelerator_count (int): - The number of accelerators to attach to a worker replica. - accelerator_type (str): - Hardware accelerator type. One of ACCELERATOR_TYPE_UNSPECIFIED, - NVIDIA_TESLA_A100, NVIDIA_TESLA_P100, NVIDIA_TESLA_V100, - NVIDIA_TESLA_K80, NVIDIA_TESLA_T4, NVIDIA_TESLA_P4 - boot_disk_type (str): - Type of the boot disk (default is `pd-ssd`). - Valid values: `pd-ssd` (Persistent Disk Solid State Drive) or - `pd-standard` (Persistent Disk Hard Disk Drive). - boot_disk_size_gb (int): - Size in GB of the boot disk (default is 100GB). - boot disk size must be within the range of [100, 64000]. - """ - - -@dataclasses.dataclass -class WorkerPoolSpecs: - """A class that holds the worker pool specs configuration for a remote job. - - Attributes: - chief (WorkerPoolSpec): - The `cheif` or `workerpool0` worker pool spec configuration. - worker (WorkerPoolSpec): - The `worker` or `workerpool1` worker pool spec configuration. - server (WorkerPoolSpec): - The `server` or `workerpool2` worker pool spec configuration. - evaluator (WorkerPoolSpec): - The `evaluator` or `workerpool3` worker pool spec configuration. - """ - - chief: WorkerPoolSpec - worker: Optional[WorkerPoolSpec] = None - server: Optional[WorkerPoolSpec] = None - evaluator: Optional[WorkerPoolSpec] = None - - -def _prepare_worker_pool_specs( - worker_pool_specs: WorkerPoolSpecs, - image_uri: str, - command: Optional[List[Any]] = [], - args: Optional[List[Any]] = [], -): - """Return each worker pools spec in order for Vertex AI Training as a list of dicts. - - Args: - worker_pool_specs (WorkerPoolSpecs): Required. Worker pool specs configuration for a remote job. - image_uri (str): Required. Image uri for training. - command (str): Command for training. - args (str): Args for training. - - Returns: - Ordered list of worker pool specs for Vertex AI Training. - - Raises: - ValueError: If replica_count for cheif worker pool spec is greater than 1. - """ - - if worker_pool_specs.chief.replica_count > 1: - raise ValueError( - "Chief worker pool spec replica_count cannot be greater than 1." - ) - spec_order = [ - worker_pool_specs.chief, - worker_pool_specs.worker, - worker_pool_specs.server, - worker_pool_specs.evaluator, - ] - formatted_specs = [{} if not spec else spec.spec_dict for spec in spec_order] - - # Remove empty trailing worker pool specs - for i in reversed(range(len(spec_order))): - if spec_order[i]: - break - formatted_specs.pop() - - # Add container spec to each non-empty worker pool spec - for spec in formatted_specs: - if spec: - spec["container_spec"] = { - "image_uri": image_uri, - "command": command, - "args": args, - } - - return formatted_specs - - -def _verify_specified_remote_config_values( - worker_pool_specs: WorkerPoolSpecs, - machine_type: str, - accelerator_type: str, - accelerator_count: int, - replica_count: Optional[int] = None, - boot_disk_type: Optional[str] = None, - boot_disk_size_gb: Optional[int] = None, -): - """Helper to validate if remote_config.worker_pool_specs is set, other remote job config values are not.""" - if worker_pool_specs and ( - machine_type - or accelerator_type - or accelerator_count - or replica_count - or boot_disk_type - or boot_disk_size_gb - ): - raise ValueError( - "Cannot specify both 'worker_pool_specs' and ['machine_type', 'accelerator_type', 'accelerator_count', 'replica_count', 'boot_disk_type', 'boot_disk_size_gb']." - ) - - -def _get_cluster_spec() -> _ClusterSpec: - """Helper to check for CLUSTER_SPEC environment variable and return object if it exists.""" - cluster_spec_str = os.getenv(_CLUSTER_SPEC, "") - if cluster_spec_str: - return _ClusterSpec(json.loads(cluster_spec_str)) - return None - - -def _get_output_path_for_distributed_training(base_dir, name) -> str: - """Helper to get output path for distributed training.""" - cluster_spec = _get_cluster_spec() - if cluster_spec: - task_type = cluster_spec.task.task_type - task_id = cluster_spec.task.task_index - - if task_type != cluster_spec.cluster.chief_task_type: - temp_path = os.path.join(base_dir, "temp") - os.makedirs(temp_path, exist_ok=True) - temp_path = os.path.join(temp_path, f"{task_type}_{task_id}") - return temp_path - - return os.path.join(base_dir, name) - - -def _get_keras_distributed_strategy(enable_distributed: bool, accelerator_count: int): - """Returns distribute strategy for Keras distributed training. - - For multi-worker training, use tf.distribute.MultiWorkerMirroredStrategy(). - For single worker, multi-GPU training, use tf.distribute.MirroredStrategy(). - For non-distributed training, return None. Requires TensorFlow >= 2.12.0. - - Args: - enable_distributed (boolean): Required. Whether distributed training is enabled. - accelerator_count (int): Accelerator count specified for single worker training. - - Returns: - A tf.distribute.Strategy. - """ - import tensorflow as tf - - if tf.__version__ < "2.13.0": - raise ValueError("TensorFlow version < 2.13.0 is not supported.") - - if enable_distributed: - cluster_spec = _get_cluster_spec() - # Multiple workers, use tf.distribute.MultiWorkerMirroredStrategy(). - if cluster_spec and len(cluster_spec.cluster.task_types) >= 2: - return tf.distribute.MultiWorkerMirroredStrategy() - # Single worker, use tf.distribute.MirroredStrategy(). We validate accelerator_count > 1 before - # creating CustomJob. - else: - return tf.distribute.MirroredStrategy() - # Multi-GPU training, but enable_distributed is false, use tf.distribute.MirroredStrategy(). - elif accelerator_count and accelerator_count > 1: - return tf.distribute.MirroredStrategy() - # Not distributed, return None. - else: - return None - - -def _set_keras_distributed_strategy(model: Any, strategy: Any): - """Returns a model compiled within the scope of the specified distribute strategy. - - Requires TensorFlow >= 2.12.0. - - Args: - model (Any): Required. An instance of a Keras model. - strategy (tf.distribute.Strategy): The distribute strategy. - - Returns: - A tf.distribute.Strategy. - """ - # Clone and compile model within scope of chosen strategy. - import tensorflow as tf - - if tf.__version__ < "2.13.0": - raise ValueError("TensorFlow version < 2.13.0 is not supported.") - - with strategy.scope(): - cloned_model = tf.keras.models.clone_model(model) - cloned_model.compile_from_config(model.get_compile_config()) - - return cloned_model - - -def setup_pytorch_distributed_training(model: Any) -> Any: - """Sets up environment for PyTorch distributed training. - - The number of nodes or processes (`world_size`) is the number of - workers being used for the training run. This helper can be called - within the Vertex remote training-enabled function of a custom model - built on top of `torch.nn.Module`. - - Example Usage: - ``` - vertexai.init( - project="my-project", - location="my-location", - staging_bucket="gs://my-bucket", - ) - vertexai.preview.init(remote=True) - - class MyModel(vertexai.preview.VertexModel, torch.nn.Module): - ... - - @vertexai.preview.developer.mark.train() - def my_train_method(self, ...): - self = setup_pytorch_distributed_training(self) - ... - - model = MyModel(...) - - # This will execute distributed, remote training - model.my_train_method(...) - ``` - Args: - model (Any): Required. An instance of a custom PyTorch model. - - Returns: - A custom model built on top of `torch.nn.Module` wrapped in DistributedDataParallel. - """ - import torch - - if not model.cluster_spec: # cluster_spec is populated for multi-worker training - return model - - device = "cuda" if model._enable_cuda else "cpu" - rank = model.cluster_spec.get_rank() - world_size = model.cluster_spec.get_world_size() - address, port = model.cluster_spec.get_chief_address_port() - - os.environ[_MASTER_ADDR] = address - os.environ[_MASTER_PORT] = str(port) - - torch.distributed.init_process_group( - backend="nccl" if device == "cuda" else "gloo", - rank=rank, - world_size=world_size, - ) - - if device == "cuda": - model.to(device) - model = torch.nn.parallel.DistributedDataParallel(model) - - _LOGGER.info( - f"Initialized process rank: {rank}, world_size: {world_size}, device: {device}", - ) - return model - - -# pylint: disable=protected-access -class ResourcePool(resource_pool_utils._ResourcePool): - """Wraps class that holds a worker pool spec configuration. - - Attributes: - replica_count (int): - The number of worker replicas. - machine_type (str): - The type of machine to use for remote training. - accelerator_count (int): - The number of accelerators to attach to a worker replica. - accelerator_type (str): - Hardware accelerator type. One of ACCELERATOR_TYPE_UNSPECIFIED, - NVIDIA_TESLA_A100, NVIDIA_TESLA_P100, NVIDIA_TESLA_V100, - NVIDIA_TESLA_K80, NVIDIA_TESLA_T4, NVIDIA_TESLA_P4 - boot_disk_type (str): - Type of the boot disk (default is `pd-ssd`). - Valid values: `pd-ssd` (Persistent Disk Solid State Drive) or - `pd-standard` (Persistent Disk Hard Disk Drive). - boot_disk_size_gb (int): - Size in GB of the boot disk (default is 100GB). - boot disk size must be within the range of [100, 64000]. - """ diff --git a/vertexai/preview/hyperparameter_tuning/__init__.py b/vertexai/preview/hyperparameter_tuning/__init__.py deleted file mode 100644 index 461acffc4b..0000000000 --- a/vertexai/preview/hyperparameter_tuning/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import warnings -from vertexai.preview.hyperparameter_tuning import ( - vizier_hyperparameter_tuner, -) -from vertexai.preview._workflow.shared import constants - - -warnings.warn(constants._V2_0_WARNING_MSG, DeprecationWarning, stacklevel=1) - -VizierHyperparameterTuner = vizier_hyperparameter_tuner.VizierHyperparameterTuner - - -__all__ = ("VizierHyperparameterTuner",) diff --git a/vertexai/preview/hyperparameter_tuning/vizier_hyperparameter_tuner.py b/vertexai/preview/hyperparameter_tuning/vizier_hyperparameter_tuner.py deleted file mode 100644 index a26976ec42..0000000000 --- a/vertexai/preview/hyperparameter_tuning/vizier_hyperparameter_tuner.py +++ /dev/null @@ -1,984 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import concurrent -import functools -import inspect -import logging -import os -from typing import Any, Callable, Dict, List, Optional, Tuple, Union -import uuid - -from google.cloud.aiplatform import base -from google.cloud.aiplatform_v1.services.vizier_service import ( - VizierServiceClient, -) -from google.cloud.aiplatform_v1.types import study as gca_study -import vertexai -from vertexai.preview._workflow.driver import remote -from vertexai.preview._workflow.driver import ( - VertexRemoteFunctor, -) -from vertexai.preview._workflow.executor import ( - remote_container_training, -) -from vertexai.preview._workflow.executor import ( - training, -) -from vertexai.preview._workflow.shared import configs -from vertexai.preview._workflow.shared import ( - supported_frameworks, -) - - -try: - import pandas as pd - - PandasData = pd.DataFrame - -except ImportError: - PandasData = Any - -_LOGGER = base.Logger(__name__) - -# Metric id constants -_CUSTOM_METRIC_ID = "custom" -_ROC_AUC_METRIC_ID = "roc_auc" -_F1_METRIC_ID = "f1" -_PRECISION_METRIC_ID = "precision" -_RECALL_METRIC_ID = "recall" -_ACCURACY_METRIC_ID = "accuracy" -_MAE_METRIC_ID = "mae" -_MAPE_METRIC_ID = "mape" -_R2_METRIC_ID = "r2" -_RMSE_METRIC_ID = "rmse" -_RMSLE_METRIC_ID = "rmsle" -_MSE_METRIC_ID = "mse" - -_SUPPORTED_METRIC_IDS = frozenset( - [ - _CUSTOM_METRIC_ID, - _ROC_AUC_METRIC_ID, - _F1_METRIC_ID, - _PRECISION_METRIC_ID, - _RECALL_METRIC_ID, - _ACCURACY_METRIC_ID, - _MAE_METRIC_ID, - _MAPE_METRIC_ID, - _R2_METRIC_ID, - _RMSE_METRIC_ID, - _RMSLE_METRIC_ID, - _MSE_METRIC_ID, - ] -) -_SUPPORTED_CLASSIFICATION_METRIC_IDS = frozenset( - [ - _ROC_AUC_METRIC_ID, - _F1_METRIC_ID, - _PRECISION_METRIC_ID, - _RECALL_METRIC_ID, - _ACCURACY_METRIC_ID, - ] -) - - -# Vizier client constnats -_STUDY_NAME_PREFIX = "vizier_hyperparameter_tuner_study" -_CLIENT_ID = "client" - -# Train and test split constants -_DEFAULT_TEST_FRACTION = 0.25 - -# Parameter constants -_TRAINING_X_PARAMS = ["X", "x", "X_train", "x_train"] -_TRAINING_DATA_PARAMS = ["X", "x", "X_train", "x_train", "training_data"] -_OSS_TRAINING_DATA_PARAMS = ["X", "x"] -_TRAINING_TARGET_VALUE_PARAMS = ["y", "y_train"] -_Y_DATA_PARAM = "y" -_X_TEST_PARAMS = ["X_test", "x_test"] -_Y_TEST = "y_test" -_VALIDATION_DATA = "validation_data" - - -class VizierHyperparameterTuner: - """The Vizier hyperparameter tuner for local and remote tuning.""" - - def __init__( - self, - get_model_func: Callable[..., Any], - max_trial_count: int, - parallel_trial_count: int, - hparam_space: List[Dict[str, Any]], - metric_id: str = _ACCURACY_METRIC_ID, - metric_goal: str = "MAXIMIZE", - max_failed_trial_count: int = 0, - search_algorithm: str = "ALGORITHM_UNSPECIFIED", - project: Optional[str] = None, - location: Optional[str] = None, - study_display_name_prefix: str = _STUDY_NAME_PREFIX, - ): - """Initializes a VizierHyperparameterTuner instance. - - VizierHyperparameterTuner provides support for local and remote Vizier - hyperparameter tuning. For information on Vertex AI Vizier, refer to - https://cloud.google.com/vertex-ai/docs/vizier/overview. - - Args: - get_model_func (Callable[..., Any]): - Required. A function that returns a model to be tuned. Non-tunable - parameters should be preset by get_model_func, and tunable - parameters will be set byVizierHyperparameterTuner. - - Example: - # parameter_a and parameter_b are tunable. - def get_model_func(parameter_a, parameter_b): - # parameter_c is non-tunable - parameter_c = 10 - return ExampleModel(parameter_a, parameter_b, parameter_c) - - For lightning models, get_model_func should return a dictionary - containing the following keys: 'model', 'trainer', - 'train_dataloaders'; each representing the lightning model, the - trainer and the training dataloader(s) respectively. - - max_trial_count (int): - Required. The desired total number of trials. - parallel_trial_count (int): - Required. The desired number of trials to run in parallel. For - pytorch lightning, currently we only support parallel_trial_count=1. - hparam_space (List[Dict[str, Any]]): - Required. A list of parameter specs each representing a single - tunable parameter. For parameter specs, refer to - https://cloud.google.com/vertex-ai/docs/reference/rest/v1/StudySpec#parameterspec - metric_id (str): - Optional. The ID of the metric. Must be one of 'roc_auc', 'f1', - 'precision', 'recall', 'accuracy', 'mae', 'mape', 'r2', 'rmse', - 'rmsle', 'mse' or 'custom'. Only 'accuracy' supports multi-class - classification. Set to 'custom' to use a custom score function. - Default is 'accuracy'. - metric_goal (str): - Optional. The optimization goal of the metric. Must be one of - 'GOAL_TYPE_UNSPECIFIED', 'MAXIMIZE' and 'MINIMIZE'. - 'GOAL_TYPE_UNSPECIFIED' defaults to maximize. Default is - 'MAXIMIZE'. Refer to - https://cloud.google.com/vertex-ai/docs/reference/rest/v1/StudySpec#goaltype - for details on goal types. - max_failed_trial_count (int): - Optional. The number of failed trials that need to be seen before - failing the tuning process. If 0, the tuning process only fails - when all trials have failed. Default is 0. - search_algorithm (str): - Optional. The search algorithm specified for the study. Must be - one of 'ALGORITHM_UNSPECIFIED', 'GRID_SEARCH' and 'RANDOM_SEARCH'. - Default is 'ALGORITHM_UNSPECIFIED'. Refer to - https://cloud.google.com/vertex-ai/docs/reference/rest/v1/StudySpec#algorithm - for details on the study algorithms. - project (str): - Optional. Project for the study. If not set, project set in - vertexai.init will be used. - location (str): - Optional. Location for the study. If not set, location set in - vertexai.init will be used. - study_display_name_prefix (str): - Optional. Prefix of the study display name. Default is - 'vizier-hyperparameter-tuner-study'. - """ - self.get_model_func = get_model_func - self.max_trial_count = max_trial_count - self.parallel_trial_count = parallel_trial_count - self.hparam_space = hparam_space - - if metric_id not in _SUPPORTED_METRIC_IDS: - raise ValueError( - f"Unsupported metric_id {metric_id}. Supported metric_ids: {_SUPPORTED_METRIC_IDS}" - ) - self.metric_id = metric_id - - self.metric_goal = metric_goal - self.max_failed_trial_count = max_failed_trial_count - self.search_algorithm = search_algorithm - - # Initializes Vertex config - self.vertex = configs.VertexConfig() - - # Creates Vizier client, study and trials - project = project or vertexai.preview.global_config.project - location = location or vertexai.preview.global_config.location - self.vizier_client, self.study = self._create_study( - project, location, study_display_name_prefix - ) - - # self.models should be a mapping from trial names to trained models. - self.models = {} - - def _create_study( - self, - project: str, - location: str, - study_display_name_prefix: str = _STUDY_NAME_PREFIX, - ) -> Tuple[VizierServiceClient, gca_study.Study]: - """Creates a Vizier study config. - - Args: - project (str): - Project for the study. - location (str): - Location for the study. - study_display_name_prefix (str): - Prefix for the study display name. Default is - 'vizier-hyperparameter-tuner-study'. - Returns: - A Vizier client and the created study. - """ - vizier_client = VizierServiceClient( - client_options=dict(api_endpoint=f"{location}-aiplatform.googleapis.com") - ) - study_config = { - "display_name": f"{study_display_name_prefix}_{uuid.uuid4()}".replace( - "-", "_" - ), - "study_spec": { - "algorithm": self.search_algorithm, - "parameters": self.hparam_space, - "metrics": [{"metric_id": self.metric_id, "goal": self.metric_goal}], - }, - } - parent = f"projects/{project}/locations/{location}" - study = vizier_client.create_study(parent=parent, study=study_config) - return vizier_client, study - - def _suggest_trials(self, num_trials: int) -> List[gca_study.Trial]: - """Suggests trials using the Vizier client. - - During each round of tuning, num_trials number of trials will - be suggested. For each trial, training will be performed locally or - remotely. After training finishes, we use the trained model to measure - the metrics and report the metrics to the trial before marking it as - completed. At the next round of tuning, another parallel_trial_count - of trials will be suggested based on previous measurements. - - Args: - num_trials (int): Required. Number of trials to suggest. - Returns: - A list of suggested trials. - """ - return ( - self.vizier_client.suggest_trials( - { - "parent": self.study.name, - "suggestion_count": num_trials, - "client_id": _CLIENT_ID, - } - ) - .result() - .trials - ) - - def get_best_models(self, num_models: int = 1) -> List[Any]: - """Gets the best models from completed trials. - - Args: - num_models (int): - Optional. The number of best models to return. Default is 1. - - Returns: - A list of best models. - """ - trials = [] - for trial in self.vizier_client.list_trials({"parent": self.study.name}).trials: - if ( - trial.state == gca_study.Trial.State.SUCCEEDED - and trial.name in self.models - ): - trials.append((trial.name, trial.final_measurement.metrics[0].value)) - - maximize = True if self.metric_goal == "MAXIMIZE" else False - trials.sort(reverse=maximize, key=lambda x: x[1]) - - return [self.models[trial[0]] for trial in trials[:num_models]] - - def _create_train_and_test_splits( - self, - x: PandasData, - y: Union[PandasData, str], - test_fraction: float = _DEFAULT_TEST_FRACTION, - ) -> Tuple[PandasData, PandasData, Optional[PandasData], PandasData]: - """Creates train and test splits if no manual test splits provided. - - Depending on the model to be tuned, the training step may take in either - one or two DataFrames for training data and target values. - - 1. Two pandas DataFrames: - - One contains training data and the other contains target values. - - Four DataFrames will be returned, ie. X_train, X_test, y_train, - y_test. - 2. One pandas DataFrame: - - Contains both training data and target values. - - Only three DataFrames will be returned, ie. X_train, X_test, - y_test. X_train contains both training data and target values. The - testing splits need to be separated into data and values to make - predictions. - - Args: - x (pandas.DataFrame): - Required. A pandas DataFrame for the dataset. If it contains the - target column, y must be a string specifying the target column - name. - y (Union[pandas.DataFrame, str]): - Required. A pandas DataFrame containing target values for the - dataset or a string specifying the target column name. - test_fraction (float): - Optional. The proportion of the dataset to include in the test - split. eg. test_fraction=0.25 for a pandas Dataframe with 100 - rows would result in 75 rows for training and 25 rows for - testing. Default is 0.25. - Returns: - A tuple containing training data, testing data, training target - values, testing target values. Training target values may be None if - training data contrains training target. - """ - if test_fraction <= 0 or test_fraction >= 1: - raise ValueError( - "test_fraction must be greater than 0 and less than 1 but was " - f"{test_fraction}." - ) - try: - from sklearn.model_selection import train_test_split - except ImportError: - raise ImportError( - "scikit-learn must be installed to create train and test splits. " - "Please call `pip install scikit-learn>=0.24`" - ) from None - - if isinstance(y, str): - try: - import pandas as pd - except ImportError: - raise ImportError( - "pandas must be installed to create train and test splits " - "with a target column name." - ) from None - x_train, x_test = train_test_split(x, test_size=test_fraction) - y_test = pd.DataFrame(x_test.pop(y)) - return x_train, x_test, None, y_test - else: - return train_test_split(x, y, test_size=test_fraction) - - def _evaluate_model( - self, model: Any, x_test: PandasData, y_test: PandasData - ) -> Tuple[Any, float]: - """Evaluates a model. - - Metrics are calculated based on the metric_id set by the user. After - reporting the metrics, mark the trial as complete. Only completed trials - can be listed as optimal trials. - - Supported metric_id: 'roc_auc', 'f1', 'precision', 'recall', 'accuracy', - 'mae', 'mape', 'r2', 'rmse', 'rmsle', 'mse' or 'custom'. Only 'accuracy' - supports multi-class classification. - - When metric_id is 'custom', the model must provide a score() function to - provide a metric value. Otherwise, the model must provide a predict() - function that returns array-like prediction results. - - e.g. - class ExampleModel: - def score(x_test, y_test): - # Code to make predictions and calculate metrics - return custom_metric(y_true=y_test, y_pred=self.predict(x_test)) - - Args: - model (Any): - Required. The model trained during the trial. - x_test (pandas.DataFrame): - Required. The testing data. - y_test (pandas.DataFrame): - Required. The testing values. - Returns: - A tuple containing the model and the corresponding metric value. - """ - try: # Only used by local tuning loop - import sklearn.metrics - - _SUPPORTED_METRIC_FUNCTIONS = { - _ROC_AUC_METRIC_ID: sklearn.metrics.roc_auc_score, - _F1_METRIC_ID: sklearn.metrics.f1_score, - _PRECISION_METRIC_ID: sklearn.metrics.precision_score, - _RECALL_METRIC_ID: sklearn.metrics.recall_score, - _ACCURACY_METRIC_ID: sklearn.metrics.accuracy_score, - _MAE_METRIC_ID: sklearn.metrics.mean_absolute_error, - _MAPE_METRIC_ID: sklearn.metrics.mean_absolute_percentage_error, - _R2_METRIC_ID: sklearn.metrics.r2_score, - _RMSE_METRIC_ID: functools.partial( - sklearn.metrics.mean_squared_error, squared=False - ), - _RMSLE_METRIC_ID: functools.partial( - sklearn.metrics.mean_squared_log_error, squared=False - ), - _MSE_METRIC_ID: sklearn.metrics.mean_squared_error, - } - except Exception as e: - raise ImportError( - "scikit-learn must be installed to evaluate models. " - "Please call `pip install scikit-learn>=0.24`" - ) from e - - if self.metric_id == _CUSTOM_METRIC_ID: - metric_value = model.score(x_test, y_test) - else: - if self.metric_id in _SUPPORTED_METRIC_IDS: - predictions = model.predict(x_test) - # Keras outputs probabilities. Must convert to output label. - if ( - supported_frameworks._is_keras(model) - and self.metric_id in _SUPPORTED_CLASSIFICATION_METRIC_IDS - ): - if isinstance(predictions, pd.DataFrame): - predictions = predictions.to_numpy() - predictions = ( - predictions.argmax(axis=-1) - if predictions.shape[-1] > 1 - else (predictions > 0.5).astype("int32") - ) - metric_value = _SUPPORTED_METRIC_FUNCTIONS[self.metric_id]( - y_test, predictions - ) - else: - raise ValueError( - f"Unsupported metric_id {self.metric_id}. Supported metric_ids: {_SUPPORTED_METRIC_IDS}" - ) - return (model, metric_value) - - def _add_model_and_report_trial_metrics( - self, trial_name: str, trial_output: Optional[Tuple[Any, float]] - ) -> None: - """Adds a model to the dictionary of trained models and report metrics. - - If trial_output is None, it means that the trial has failed and should - be marked as infeasible. - - Args: - trial_name (str): - Required. The trial name. - trial_output (Optional[Tuple[Any, float]]): - Required. A tuple containing the model and the metric value, or - None if the trial has failed. - """ - if trial_output is not None: - model, metric_value = trial_output - self.vizier_client.complete_trial( - { - "name": trial_name, - "final_measurement": { - "metrics": [ - {"metric_id": self.metric_id, "value": metric_value} - ] - }, - } - ) - self.models[trial_name] = model - else: - self.vizier_client.complete_trial( - {"name": trial_name, "trial_infeasible": True} - ) - - def _get_model_param_type_mapping(self): - """Gets a mapping from parameter_id to its type. - - Returns: - A mapping from parameter id to its type. - """ - model_param_type_mapping = {} - for param in self.hparam_space: - param_id = param["parameter_id"] - if "double_value_spec" in param: - param_type = float - elif "integer_value_spec" in param: - param_type = int - elif "categorical_value_spec" in param: - param_type = str - elif "discrete_value_spec" in param: - param_type = type(param["discrete_value_spec"]["values"][0]) - else: - raise ValueError( - f"Invalid hparam_space configuration for parameter {param_id}" - ) - model_param_type_mapping[param_id] = param_type - - return model_param_type_mapping - - def _set_model_parameters( - self, - trial: gca_study.Trial, - fixed_init_params: Optional[Dict[Any, Any]] = None, - fixed_runtime_params: Optional[Dict[Any, Any]] = None, - ) -> Tuple[Any, Dict[Any, Any]]: - """Returns a model intialized with trial parameters and a dictionary of runtime parameters. - - Initialization parameters are passed to the get_model_func. Runtime parameters - will be passed to the model's fit() or @developer.mark.train()-decorated - method outside of this function. - - Args: - trial (gca_study.Trial): Required. A trial suggested by Vizier. - fixed_init_params (Dict[Any, Any]): Optional. A dictionary of fixed - parameters to be passed to get_model_func. - fixed_runtime_params (Dict[Any, Any]): Optional. A dictionary of fixed - runtime parameters. - - Returns: - A model initialized using parameters from the specified trial and - a dictionary of runtime parameters. - """ - model_init_params = {} - model_runtime_params = {} - get_model_func_binding = inspect.signature(self.get_model_func).parameters - - model_param_type_mapping = self._get_model_param_type_mapping() - - for param in trial.parameters: - param_id = param.parameter_id - param_value = ( - model_param_type_mapping[param_id](param.value) - if param_id in model_param_type_mapping - else param.value - ) - if param_id in get_model_func_binding: - model_init_params[param_id] = param_value - else: - model_runtime_params[param_id] = param_value - - if fixed_init_params: - model_init_params.update(fixed_init_params) - if fixed_runtime_params: - model_runtime_params.update(fixed_runtime_params) - - return self.get_model_func(**model_init_params), model_runtime_params - - def _is_remote(self, train_method: VertexRemoteFunctor) -> bool: - """Checks if a train method will be executed locally or remotely. - - The train method will be executed remotely if: - - The train method's vertex config sets remote to True (eg. - train.vertex.remote=True) - - Or, .vertex.remote is not set but the global config defaults - remote to True. (eg. vertexai.preview.init(remote=True, ...)) - - Otherwise, the train method will be executed locally. - - Args: - train_method (VertexRemoteFunctor): - Required. The train method. - Returns: - Whether the train method will be executed locally or remotely. - """ - return train_method.vertex.remote or ( - train_method.vertex.remote is None and vertexai.preview.global_config.remote - ) - - def _override_staging_bucket( - self, train_method: VertexRemoteFunctor, trial_name: str - ) -> None: - """Overrides the staging bucket for a train method. - - A staging bucket must be specified by: - - The train method's training config. - eg. train.vertex.remote_config.staging_bucket = ... - - Or, .vertex.remote_config.staging_bucket is not set, but a - default staging bucket is specified in the global config. - eg. vertexai.init(staging_bucket=...) - - The staging bucket for each trial is overriden so that each trial uses - its own directory. - - Args: - train_method (VertexRemoteFunctor): - Required. The train method. - trial_name (str): Required. The trial name. - Raises: - ValueError if no staging bucket specified and no default staging - bucket set. - """ - staging_bucket = ( - train_method.vertex.remote_config.staging_bucket - or vertexai.preview.global_config.staging_bucket - ) - if not staging_bucket: - raise ValueError( - "No default staging bucket set. " - "Please call `vertexai.init(staging_bucket='gs://my-bucket')." - ) - train_method.vertex.remote_config.staging_bucket = os.path.join( - staging_bucket, - "-".join(trial_name.split("/")[:-1]), - trial_name.split("/")[-1], - ) - - def _get_vertex_model_train_method_and_params( - self, - model: remote.VertexModel, - x_train: PandasData, - y_train: Optional[PandasData], - x_test: PandasData, - y_test: PandasData, - trial_name: str, - ) -> Tuple[VertexRemoteFunctor, Dict[str, Any]]: - """Gets the train method for a VertexModel model and data parameters. - - Supported parameter names: - - Training data: ['X', 'X_train', 'x', 'x_train', 'training_data']. - - Training target values: ['y', 'y_train']. If not provided, training - data should contain target values. - - Testing data: ['X_test', 'x_test', 'validation_data']. - - Testing target values: ['y_test']. If not provided, testing data - should contain target values. - - If remote mode is turned on, overrides the training staging bucket for - each trial. - - Args: - model (remote.VertexModel): - Required. An instance of VertexModel. - x_train (pandas.DataFrame): - Required. Training data. - y_train (Optional[pandas.DataFrame]): - Required. Training target values. If None, x_train should - include training target values. - x_test (pandas.DataFrame): - Required. Testing data. - y_test (pandas.DataFrame): - Required. Testing target values. - trial_name (str): - Required. The trial name. - Returns: - The train method for the Vertex model and data params. - Raises: - ValueError if there is no remote executable train method. - """ - data_params = {} - for _, attr_value in inspect.getmembers(model): - if isinstance(attr_value, VertexRemoteFunctor) and ( - attr_value._remote_executor == training.remote_training - or attr_value._remote_executor == remote_container_training.train - ): - params = inspect.signature(attr_value).parameters - for param in params: - if param in _TRAINING_DATA_PARAMS: - data_params[param] = x_train - elif param in _TRAINING_TARGET_VALUE_PARAMS: - data_params[param] = y_train - elif param in _X_TEST_PARAMS: - data_params[param] = x_test - elif param == _Y_TEST: - data_params[_Y_TEST] = y_test - elif param == _VALIDATION_DATA: - data_params[_VALIDATION_DATA] = pd.concat( - [x_test, y_test], axis=1 - ) - if self._is_remote(attr_value): - self._override_staging_bucket(attr_value, trial_name) - return (attr_value, data_params) - raise ValueError("No remote executable train method.") - - def _get_lightning_train_method_and_params( - self, - model: Dict[str, Any], - trial_name: str, - ): - """Gets the train method and parameters for a Lightning model. - - Given the lightning model, the trainer and the training dataloader(s), - returns trainer.fit and the parameters containing the model and the - training dataloader(s). If the trainer is enabled to run remotely and - remote mode is turned on, overrides the training staging bucket for - each trial. - - Training data and target values have already been passed into the - training dataloader(s), so no additional runtime parameters need to be - set. - - Args: - model (Dict[str, Any]): - Required. A dictionary containing the following keys: 'model', - 'trainer', 'train_dataloaders'; each representing the lightning - model, the trainer and the training dataloader(s) respectively. - trial_name (str): - Required. The trial name. - Returns: - The train method and its parameters for the lightning model. - """ - trainer = model["trainer"] - if isinstance(trainer.fit, VertexRemoteFunctor) and self._is_remote( - trainer.fit - ): - self._override_staging_bucket(trainer.fit, trial_name) - return trainer.fit, { - "model": model["model"], - "train_dataloaders": model["train_dataloaders"], - } - - def _run_trial( - self, - x_train: PandasData, - y_train: Optional[PandasData], - x_test: PandasData, - y_test: PandasData, - trial: gca_study.Trial, - fixed_init_params: Optional[Dict[Any, Any]] = None, - fixed_runtime_params: Optional[Dict[Any, Any]] = None, - ) -> Optional[Tuple[Any, float]]: - """Runs a trial. - - This function sets model parameters and train method parameters, - launches either local or remote training, and evaluates the model. With - parallel tuning, this function can be the target function that would be - executed in parallel. - - Args: - x_train (pandas.DataFrame): - Required. Training data. - y_train (Optional[pandas.DataFrame]): - Required. Training target values. If None, x_train should - include training target values. - x_test (pandas.DataFrame): - Required. Testing data. - y_test (pandas.DataFrame): - Required. Testing target values. - trial (gca_study.Trial): Required. A trial suggested by Vizier. - fixed_init_params (Dict[Any, Any]): Optional. A dictionary of fixed - parameters to be passed to get_model_func. - fixed_runtime_params (Dict[Any, Any]): Optional. A dictionary of - fixed runtime parameters. - Returns: - If the trial is feasible, returns a tuple of the trained model and - its corresponding metric value. If the trial is infeasible, returns - None. - """ - model, model_runtime_params = self._set_model_parameters( - trial, fixed_init_params, fixed_runtime_params - ) - - if isinstance(model, remote.VertexModel): - train_method, params = self._get_vertex_model_train_method_and_params( - model, - x_train, - y_train, - x_test, - y_test, - trial.name, - ) - elif isinstance(model, dict): - train_method, params = self._get_lightning_train_method_and_params( - model, - trial.name, - ) - elif supported_frameworks._is_keras(model): - train_method, params = self._get_train_method_and_params( - model, x_train, y_train, trial.name, params=["x", "y"] - ) - elif supported_frameworks._is_sklearn(model): - train_method, params = self._get_train_method_and_params( - model, x_train, y_train, trial.name, params=["X", "y"] - ) - else: - raise ValueError(f"Unsupported model type {type(model)}") - - model_runtime_params.update(params) - - try: - train_method(**model_runtime_params) - except Exception as e: - _LOGGER.warning(f"Trial {trial.name} failed: {e}.") - return None - - if isinstance(model, dict): - # For lightning, evaluate the model and keep track of the dictionary - # containing the model, the trainer, and the training dataloader(s). - _, metric_value = self._evaluate_model(model["model"], x_test, y_test) - return model, metric_value - - return self._evaluate_model(model, x_test, y_test) - - def _get_train_method_and_params( - self, - model: Any, - x_train: PandasData, - y_train: Optional[PandasData], - trial_name: str, - params: List[str], - ) -> Tuple[VertexRemoteFunctor, Dict[str, Any]]: - """Gets the train method for an Sklearn or Keras model and data parameters. - - Args: - model (Any): - Required. An instance of an Sklearn or Keras model. - x_train (pandas.DataFrame): - Required. Training data. - y_train (Optional[pandas.DataFrame]): - Required. Training target values. - trial_name (str): - Required. The trial name. - params (str): - Required. The list of data parameters. - Returns: - The train method for the model and data params. - Raises: - ValueError if there is no remote executable train method. - """ - data_params = {} - if isinstance(model.fit, VertexRemoteFunctor) and self._is_remote(model.fit): - self._override_staging_bucket(model.fit, trial_name) - attr_params = inspect.signature(model.fit).parameters - for param in params: - if param not in attr_params: - raise ValueError(f"Invalid data parameter {param}.") - if param in _OSS_TRAINING_DATA_PARAMS: - data_params[param] = x_train - elif param == _Y_DATA_PARAM: - data_params[param] = y_train - return (model.fit, data_params) - - def fit( - self, - x: PandasData, - y: Union[PandasData, str], - x_test: Optional[PandasData] = None, - y_test: Optional[PandasData] = None, - test_fraction: Optional[float] = _DEFAULT_TEST_FRACTION, - **kwargs, - ): - """Runs Vizier-backed hyperparameter tuning for a model. - - Extra runtime arguments will be forwarded to a model's fit() or - @vertexai.preview.developer.mark.train()-decorated method. - - Example Usage: - ``` - def get_model_func(parameter_a, parameter_b): - # parameter_c is non-tunable - parameter_c = 10 - return ExampleModel(parameter_a, parameter_b, parameter_c) - - x, y = pd.DataFrame(...), pd.DataFrame(...) - tuner = VizierHyperparameterTuner(get_model_func, ...) - # num_epochs will be passed to ExampleModel.fit() - # (ex: ExampleModel.fit(x, y, num_epochs=5)) - tuner.fit(x, y, num_epochs=5) - ``` - - Args: - x (pandas.DataFrame): - Required. A pandas DataFrame for the dataset. If it contains the - target column, y must be a string specifying the target column - name. - y (Union[pandas.DataFrame, str]): - Required. A pandas DataFrame containing target values for the - dataset or a string specifying the target column name. - x_test (pandas.DataFrame): - Optional. A pandas DataFrame for the test dataset. If not provided, - X will be split into X_train and X_test based on test_fraction. - y_test (pandas.DataFrame): - Optional. A pandas DataFrame containing target values for the test - dataset. If not provided, y will be split into y_train and t_test - based on test_fraction. - test_fraction (float): - Optional. The proportion of the dataset to include in the test - split. eg. test_fraction=0.25 for a pandas Dataframe with 100 - rows would result in 75 rows for training and 25 rows for - testing. Default is 0.25. - **kwargs (Any): - Optional. Keyword arguments to pass to the model's fit(), - or @vertexai.preview.developer.mark.train()-decorated method. - - Returns: - A model initialized using parameters from the specified trial and - a dictionary of runtime parameters. - """ - if x_test is None or y_test is None or x_test.empty or y_test.empty: - x, x_test, y, y_test = self._create_train_and_test_splits( - x, y, test_fraction - ) - - # Fixed params that are passed to get_model_func. - # Lightning, for example, requires X and y to be passed to get_model_func. - fixed_init_params = {} - get_model_func_binding = inspect.signature(self.get_model_func).parameters - for x_param_name in _TRAINING_X_PARAMS: - if x_param_name in get_model_func_binding: - # Temporary solution for b/295191253 - # TODO(b/295191253) - if self.parallel_trial_count > 1: - raise ValueError( - "Currently pytorch lightning only supports `parallel_trial_count = 1`. " - f"In {self} it was set to {self.parallel_trial_count}." - ) - fixed_init_params[x_param_name] = x - break - for y_param_name in _TRAINING_TARGET_VALUE_PARAMS: - if y_param_name in get_model_func_binding: - fixed_init_params[y_param_name] = y - break - - # Disable remote job logs when running trials. - logging.getLogger("vertexai.remote_execution").disabled = True - try: - num_completed_trials = 0 - num_failed_trials = 0 - while num_completed_trials < self.max_trial_count: - num_new_trials = min( - (self.max_trial_count - num_completed_trials), - self.parallel_trial_count, - ) - suggested_trials = self._suggest_trials(num_new_trials) - inputs = [ - (x, y, x_test, y_test, trial, fixed_init_params, kwargs) - for trial in suggested_trials - ] - _LOGGER.info( - f"Number of completed trials: {num_completed_trials}, " - f"Number of new trials: {num_new_trials}." - ) - - with concurrent.futures.ThreadPoolExecutor( - max_workers=num_new_trials - ) as executor: - trial_outputs = list( - executor.map(lambda t: self._run_trial(*t), inputs) - ) - - for i in range(num_new_trials): - trial_output = trial_outputs[i] - self._add_model_and_report_trial_metrics( - suggested_trials[i].name, trial_output - ) - if not trial_output: - num_failed_trials += 1 - if num_failed_trials == self.max_failed_trial_count: - raise ValueError("Maximum number of failed trials reached.") - num_completed_trials += num_new_trials - except Exception as e: - raise e - finally: - # Enable remote job logs after trials are complete. - logging.getLogger("vertexai.remote_execution").disabled = False - - if num_failed_trials == num_completed_trials: - raise ValueError("All trials failed.") - - _LOGGER.info( - f"Number of completed trials: {num_completed_trials}. Tuning complete." - ) diff --git a/vertexai/preview/initializer.py b/vertexai/preview/initializer.py deleted file mode 100644 index 08e7b1dc54..0000000000 --- a/vertexai/preview/initializer.py +++ /dev/null @@ -1,125 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from typing import Optional -import warnings -from google.cloud import aiplatform -from google.cloud.aiplatform import base -from vertexai.preview._workflow.executor import ( - persistent_resource_util, -) -from vertexai.preview._workflow.shared import ( - configs, - constants, -) - - -_LOGGER = base.Logger(__name__) - - -class _Config: - """Store common configurations and current workflow for remote execution.""" - - def __init__(self): - warnings.warn(constants._V2_0_WARNING_MSG, DeprecationWarning, stacklevel=1) - self._remote = False - self._cluster = None - - def init( - self, - *, - remote: Optional[bool] = None, - autolog: Optional[bool] = None, - cluster: Optional[configs.PersistentResourceConfig] = None, - ): - """Updates preview global parameters for Vertex remote execution. - - Args: - remote (bool): - Optional. A global flag to indicate whether or not a method will - be executed remotely. Default is Flase. The method level remote - flag has higher priority than this global flag. - autolog (bool): - Optional. Whether or not to turn on autologging feature for remote - execution. To learn more about the autologging feature, see - https://cloud.google.com/vertex-ai/docs/experiments/autolog-data. - cluster (PersistentResourceConfig): - Optional. If passed, check if the cluster exists. If not, create - a default one (single node, "n1-standard-4", no GPU) with the - given name. Then use the cluster to run CustomJobs. Default is - None. Example usage: - from vertexai.preview.shared.configs import PersistentResourceConfig - cluster = PersistentResourceConfig( - name="my-cluster-1", - resource_pools=[ - ResourcePool(replica_count=1,), - ResourcePool( - machine_type="n1-standard-8", - replica_count=2, - accelerator_type="NVIDIA_TESLA_P100", - accelerator_count=1, - ), - ] - ) - """ - if remote is not None: - self._remote = remote - - if autolog is True: - aiplatform.autolog() - elif autolog is False: - aiplatform.autolog(disable=True) - - if cluster is not None: - if cluster.disable: - self._cluster = None - else: - self._cluster = cluster - cluster_resource_name = persistent_resource_util.cluster_resource_name( - project=self.project, - location=self.location, - name=self._cluster.name, - ) - cluster_exists = persistent_resource_util.check_persistent_resource( - cluster_resource_name=cluster_resource_name, - service_account=cluster.service_account, - ) - if cluster_exists: - _LOGGER.info(f"Using existing cluster: {cluster_resource_name}") - return - # create a cluster - persistent_resource_util.create_persistent_resource( - cluster_resource_name=cluster_resource_name, - resource_pools=cluster.resource_pools, - service_account=cluster.service_account, - ) - - @property - def remote(self): - return self._remote - - @property - def autolog(self): - return aiplatform.utils.autologging_utils._is_autologging_enabled() - - @property - def cluster(self): - return self._cluster - - def __getattr__(self, name): - return getattr(aiplatform.initializer.global_config, name) - - -global_config = _Config() diff --git a/vertexai/preview/tabular_models/__init__.py b/vertexai/preview/tabular_models/__init__.py deleted file mode 100644 index 5ee4607d17..0000000000 --- a/vertexai/preview/tabular_models/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import warnings - -from vertexai.preview._workflow.shared import constants -from vertexai.preview.tabular_models import tabnet_trainer - - -warnings.warn(constants._V2_0_WARNING_MSG, DeprecationWarning, stacklevel=1) - -TabNetTrainer = tabnet_trainer.TabNetTrainer - - -__all__ = ("TabNetTrainer",) diff --git a/vertexai/preview/tabular_models/tabnet_trainer.py b/vertexai/preview/tabular_models/tabnet_trainer.py deleted file mode 100644 index 93218fd4bf..0000000000 --- a/vertexai/preview/tabular_models/tabnet_trainer.py +++ /dev/null @@ -1,412 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import inspect -from typing import Any - -from google.cloud.aiplatform import base -from google.cloud.aiplatform.utils import gcs_utils -from vertexai.preview import developer -from vertexai.preview._workflow.driver import remote -from vertexai.preview._workflow.shared import configs -from vertexai.preview.developer import remote_specs - - -try: - import pandas as pd - - PandasData = pd.DataFrame - -except ImportError: - PandasData = Any - - -_LOGGER = base.Logger(__name__) - -# Constants for TabNetTrainer -_TABNET_TRAINING_IMAGE = "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/tabnet-training:20230605_1325" - -_TABNET_FIT_DISPLAY_NAME = "fit" -_TABNET_MACHINE_TYPE = "c2-standard-16" -_TABNET_BOOT_DISK_TYPE = "pd-ssd" -_TABNET_BOOT_DISK_SIZE_GB = 100 - -_CLASSIFICATION = "classification" -_REGRESSION = "regression" - - -class TabNetTrainer(remote.VertexModel): - """The TabNet trainer for remote training and prediction.""" - - def __init__( - self, - model_type: str, - target_column: str, - learning_rate: float, - job_dir: str = "", - enable_profiler: bool = False, - cache_data: str = "auto", - seed: int = 1, - large_category_dim: int = 1, - large_category_thresh: int = 300, - yeo_johnson_transform: bool = False, - weight_column: str = "", - max_steps: int = -1, - max_train_secs: int = -1, - measurement_selection_type: str = "BEST_MEASUREMENT", - optimization_metric: str = "", - eval_steps: int = 0, - batch_size: int = 100, - eval_frequency_secs: int = 600, - feature_dim: int = 64, - feature_dim_ratio: float = 0.5, - num_decision_steps: int = 6, - relaxation_factor: float = 1.5, - decay_every: float = 100.0, - decay_rate: float = 0.95, - gradient_thresh: float = 2000.0, - sparsity_loss_weight: float = 0.00001, - batch_momentum: float = 0.95, - batch_size_ratio: float = 0.25, - num_transformer_layers: int = 4, - num_transformer_layers_ratio: float = 0.25, - class_weight: float = 1.0, - loss_function_type: str = "default", - alpha_focal_loss: float = 0.25, - gamma_focal_loss: float = 2.0, - ): - """Initializes a TabNetTrainer instance. - - is_remote_trainer is always set to True because TabNetTrainer only - supports remote training. - - Args: - model_type (str): - Required. The type of prediction the model is to produce. - 'classification' or 'regression'. - target_column (str): - Required. The target column name. - learning_rate (float): - Required. The learning rate used by the linear optimizer. - job_dir (str): - Optional. The GCS directory for reading and writing inside the - the custom job. If provided, must start with 'gs://'. Default is - ''. - enable_profiler (bool): - Optional. Enables profiling and saves a trace during evaluation. - Default is False. - cache_data (str): - Optional. Whether to cache data or not. If set to 'auto', - caching is determined based on the dataset size. Default is - 'auto'. - seed (int): - Optional. Seed to be used for this run. Default is 1. - large_category_dim (int): - Optional. Embedding dimension for categorical feature with - large number of categories. Default is 1. - large_category_thresh (int): - Optional. Threshold for number of categories to apply - large_category_dim embedding dimension to. Default is 300. - yeo_johnson_transform (bool): - Optional. Enables trainable Yeo-Johnson power transform. Default - is False. - weight_column (str): - Optional. The weight column name. ''(empty string) for no - weight column. Default is ''(empty string). - max_steps (int): - Optional. Number of steps to run the trainer for. -1 for no - maximum steps. Default is -1. - max_train_seconds (int): - Optional. Amount of time in seconds to run the trainer for. -1 - for no maximum train seconds. Default is -1. - measurement_selection_type (str): - Optional. Which measurement to use if/when the service - automatically selects the final measurement from previously - reported intermediate measurements. One of 'BEST_MEASUREMENT' - or 'LAST_MEASUREMENT'. Default is 'BEST_MEASUREMENT'. - optimization_metric (str): - Optional. Optimization metric used for - `measurement_selection_type`. ''(empty string) for using the - default value: 'rmse' for regression and 'auc' for - classification. Default is ''(empty string). - eval_steps (int): - Optional. Number of steps to run evaluation for. If not - specified or negative, it means run evaluation on the whole - validation dataset. If set to 0, it means run evaluation for a - fixed number of samples. Default is 0. - batch_size (int): - Optional. Batch size for training. Default is 100. - eval_frequency_secs (int): - Optional. Frequency at which evaluation and checkpointing will - take place. Default is 600. - feature_dim (int): - Optional. Dimensionality of the hidden representation in feature - transformation block. Default is 64. - feature_dim_ratio (float): - Optional. The ratio of output dimension (dimensionality of the - outputs of each decision step) to feature dimension. Default is - 0.5. - num_decision_steps (int): - Optional. Number of sequential decision steps. Default is 6. - relaxation_factor (float): - Optional. Relaxation factor that promotes the reuse of each - feature at different decision steps. When it is 1, a feature is - enforced to be used only at one decision step and as it - increases, more flexibility is provided to use a feature at - multiple decision steps. Default is 1.5. - decay_every (float): - Optional. Number of iterations for periodically applying - learning rate decaying. Default is 100.0. - decay_rate (float): - Optional. Learning rate decaying. Default is 0.95. - gradient_thresh (float): - Optional. Threshold for the norm of gradients for clipping. - Default is 2000.0. - sparsity_loss_weight (float): - Optional. Weight of the loss for sparsity regularization - (increasing it will yield more sparse feature selection). - Default is 0.00001. - batch_momentum (float): - Optional. Momentum in ghost batch normalization. Default is - 0.95. - batch_size_ratio (float): - Optional. The ratio of virtual batch size (size of the ghost - batch normalization) to batch size. Default is 0.25. - num_transformer_layers (int): - Optional. The number of transformer layers for each decision - step. used only at one decision step and as it increases, more - flexibility is provided to use a feature at multiple decision - steps. Default is 4. - num_transformer_layers_ratio (float): - Optional. The ratio of shared transformer layer to transformer - layers. Default is 0.25. - class_weight (float): - Optional. The class weight is used to compute a weighted cross - entropy which is helpful in classifying imbalanced dataset. Only - used for classification. Default is 1.0. - loss_function_type (str): - Optional. Loss function type. Loss function in classification - [cross_entropy, weighted_cross_entropy, focal_loss], default is - cross_entropy. Loss function in regression: [rmse, mae, mse], - default is mse. "default" for default values. Default is - "default". - alpha_focal_loss (float): - Optional. Alpha value (balancing factor) in focal_loss function. - Only used for classification. Default is 0.25. - gamma_focal_loss (float): - Optional. Gamma value (modulating factor) for focal loss for - focal loss. Only used for classification. Default is 2.0. - Raises: - ValueError if job_dir is set to an invalid GCS path. - """ - super().__init__() - if job_dir: - gcs_utils.validate_gcs_path(job_dir) - sig = inspect.signature(self.__init__) - self._binding = sig.bind( - model_type, - target_column, - learning_rate, - job_dir, - enable_profiler, - cache_data, - seed, - large_category_dim, - large_category_thresh, - yeo_johnson_transform, - weight_column, - max_steps, - max_train_secs, - measurement_selection_type, - optimization_metric, - eval_steps, - batch_size, - eval_frequency_secs, - feature_dim, - feature_dim_ratio, - num_decision_steps, - relaxation_factor, - decay_every, - decay_rate, - gradient_thresh, - sparsity_loss_weight, - batch_momentum, - batch_size_ratio, - num_transformer_layers, - num_transformer_layers_ratio, - class_weight, - loss_function_type, - alpha_focal_loss, - gamma_focal_loss, - ).arguments - self._binding["is_remote_trainer"] = True - self.model = None - - @developer.mark._remote_container_train( - image_uri=_TABNET_TRAINING_IMAGE, - additional_data=[ - remote_specs._InputParameterSpec( - "training_data", - argument_name="training_data_path", - serializer="parquet", - ), - remote_specs._InputParameterSpec( - "validation_data", - argument_name="validation_data_path", - serializer="parquet", - ), - remote_specs._InputParameterSpec("model_type"), - remote_specs._InputParameterSpec("target_column"), - remote_specs._InputParameterSpec("learning_rate"), - remote_specs._InputParameterSpec("job_dir"), - remote_specs._InputParameterSpec("enable_profiler"), - remote_specs._InputParameterSpec("cache_data"), - remote_specs._InputParameterSpec("seed"), - remote_specs._InputParameterSpec("large_category_dim"), - remote_specs._InputParameterSpec("large_category_thresh"), - remote_specs._InputParameterSpec("yeo_johnson_transform"), - remote_specs._InputParameterSpec("weight_column"), - remote_specs._InputParameterSpec("max_steps"), - remote_specs._InputParameterSpec("max_train_secs"), - remote_specs._InputParameterSpec("measurement_selection_type"), - remote_specs._InputParameterSpec("optimization_metric"), - remote_specs._InputParameterSpec("eval_steps"), - remote_specs._InputParameterSpec("batch_size"), - remote_specs._InputParameterSpec("eval_frequency_secs"), - remote_specs._InputParameterSpec("feature_dim"), - remote_specs._InputParameterSpec("feature_dim_ratio"), - remote_specs._InputParameterSpec("num_decision_steps"), - remote_specs._InputParameterSpec("relaxation_factor"), - remote_specs._InputParameterSpec("decay_every"), - remote_specs._InputParameterSpec("decay_rate"), - remote_specs._InputParameterSpec("gradient_thresh"), - remote_specs._InputParameterSpec("sparsity_loss_weight"), - remote_specs._InputParameterSpec("batch_momentum"), - remote_specs._InputParameterSpec("batch_size_ratio"), - remote_specs._InputParameterSpec("num_transformer_layers"), - remote_specs._InputParameterSpec("num_transformer_layers_ratio"), - remote_specs._InputParameterSpec("class_weight"), - remote_specs._InputParameterSpec("loss_function_type"), - remote_specs._InputParameterSpec("alpha_focal_loss"), - remote_specs._InputParameterSpec("gamma_focal_loss"), - remote_specs._InputParameterSpec("is_remote_trainer"), - remote_specs._OutputParameterSpec("output_model_path"), - ], - remote_config=configs.DistributedTrainingConfig( - display_name=_TABNET_FIT_DISPLAY_NAME, - machine_type=_TABNET_MACHINE_TYPE, - boot_disk_type=_TABNET_BOOT_DISK_TYPE, - boot_disk_size_gb=_TABNET_BOOT_DISK_SIZE_GB, - ), - ) - def fit(self, training_data: PandasData, validation_data: PandasData) -> None: - """Trains a tabnet model in a custom job. - - After the custom job successfully finishes, load the model and set it to - self.model to enable prediction. If TensorFlow is not installed, the - model will not be loaded. - - Training config can be overriden by setting the training config. - - Example Usage: - ` - tabnet_trainer = TabNetTrainer(...) - tabnet_trainer.fit.vertex.remote_config.staging_bucket = 'gs://...' - tabnet_trainer.fit.vertex.remote_config.display_name = 'example' - tabnet_trainer.fit(...) - ` - - PandasData refers to a pandas DataFrame. Each data frame should meet the - following requirements: - 1. All entries should be numerical (no string, array or object). - 2. For categorical columns, the entries should be integers. In - addition, the column type should be set to 'category'. Otherwise, it - will be treated as numerical columns. - 3. The column names should be string. - - Args: - training_data (pandas.DataFrame): - Required. A pandas DataFrame for training. - validation_data (pandas.DataFrame): - Required. A pandas DataFrame for validation. - """ - try: - import tensorflow.saved_model as tf_saved_model - - self.model = tf_saved_model.load(self.output_model_path) - except ImportError: - _LOGGER.warning( - "TensorFlow must be installed to load the trained model. The model is stored at %s", - self.output_model_path, - ) - - def predict(self, input_data: PandasData) -> PandasData: - """Makes prediction on input data through a trained model. - - Unlike in training and validation data, the categorical columns in - prediction input data can have dtypes either 'category' or 'int', with - 'int' being numpy.int64 in pandas DataFrame. - - - Args: - input_data (pandas.DataFrame): - Required. An input Pandas DataFrame containing data for - prediction. It will be preprocessed into a dictionary as the - input for to the trained model. - Returns: - Prediction results in the format of pandas DataFrame. - """ - try: - import tensorflow as tf - except ImportError: - raise ImportError( - "TensorFlow must be installed to make predictions." - ) from None - - if self.model is None: - if not hasattr(self, "output_model_path") or self.output_model_path is None: - raise ValueError("No trained model. Please call .fit first.") - self.model = tf.saved_model.load(self.output_model_path) - - prediction_inputs = {} - for col in input_data.columns: - if input_data[col].dtypes == "category": - dtype = tf.int64 - else: - dtype = tf.dtypes.as_dtype(input_data[col].dtypes) - prediction_inputs[col] = tf.constant(input_data[col].to_list(), dtype=dtype) - prediction_outputs = self.model.signatures["serving_default"]( - **prediction_inputs - ) - if self._binding["model_type"] == _CLASSIFICATION: - predicted_labels = [] - for score, labels in zip( - prediction_outputs["scores"].numpy(), - prediction_outputs["classes"].numpy().astype(int), - ): - predicted_labels.append(labels[score.argmax()]) - return pd.DataFrame({self._binding["target_column"]: predicted_labels}) - elif self._binding["model_type"] == _REGRESSION: - return pd.DataFrame( - { - self._binding["target_column"]: prediction_outputs["value"] - .numpy() - .reshape(-1) - } - ) - else: - raise ValueError(f"Unsupported model type: {self._binding['model_type']}.") diff --git a/vertexai/resources/preview/__init__.py b/vertexai/resources/preview/__init__.py index e6a2706742..ba2954343d 100644 --- a/vertexai/resources/preview/__init__.py +++ b/vertexai/resources/preview/__init__.py @@ -29,7 +29,9 @@ from google.cloud.aiplatform.preview.featurestore.entity_type import ( EntityType, ) - +from google.cloud.aiplatform.preview.persistent_resource import ( + PersistentResource, +) from google.cloud.aiplatform.preview.pipelinejobschedule.pipeline_job_schedules import ( PipelineJobSchedule, ) @@ -37,6 +39,7 @@ from vertexai.resources.preview.feature_store import ( Feature, FeatureGroup, + FeatureGroupBigQuerySource, FeatureOnlineStore, FeatureOnlineStoreType, FeatureView, From 7b4ae8d5eca41b80972060471259f055b46a2d47 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 5 Jun 2024 13:09:03 -0700 Subject: [PATCH 16/36] Copybara import of the project: MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -- aa7c34cc9a86a78effeeb811b92f863e76222859 by Owl Bot : chore: Update gapic-generator-python to v1.18.0 PiperOrigin-RevId: 638650618 Source-Link: https://github.com/googleapis/googleapis/commit/6330f0389afdd04235c59898cc44f715b077aa25 Source-Link: https://github.com/googleapis/googleapis-gen/commit/44fa4f1979dc45c1778fd7caf13f8e61c6d1cae8 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNDRmYTRmMTk3OWRjNDVjMTc3OGZkN2NhZjEzZjhlNjFjNmQxY2FlOCJ9 -- 5871a1c5340f1486f96b993f708efd7d840401a6 by Owl Bot : 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md -- 24b4f7557bced89f62f931750d5cd39891ee212b by Owl Bot : feat: add rag_embedding_model_config to RagCorpus feat: add max_embedding_requests_per_min to ImportRagFilesConfig PiperOrigin-RevId: 640251019 Source-Link: https://github.com/googleapis/googleapis/commit/ede5e02ad747c9199a7953b222b85715e097189c Source-Link: https://github.com/googleapis/googleapis-gen/commit/7bb40e01cc6013b066e81827a8dd19ade6e71b39 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiN2JiNDBlMDFjYzYwMTNiMDY2ZTgxODI3YThkZDE5YWRlNmU3MWIzOSJ9 -- 1218b82ba198179ef4957f55d83a7efeb28fb621 by Owl Bot : 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md COPYBARA_INTEGRATE_REVIEW=https://github.com/googleapis/python-aiplatform/pull/3846 from googleapis:owl-bot-copy b772b4d4377c7ee1e721bf281b3529012d410e55 PiperOrigin-RevId: 640624369 --- .../services/dataset_service/async_client.py | 1 + .../async_client.py | 1 + .../services/endpoint_service/async_client.py | 1 + .../async_client.py | 1 + .../async_client.py | 1 + .../feature_registry_service/async_client.py | 1 + .../async_client.py | 1 + .../featurestore_service/async_client.py | 1 + .../gen_ai_tuning_service/async_client.py | 1 + .../index_endpoint_service/async_client.py | 1 + .../services/index_service/async_client.py | 1 + .../services/job_service/async_client.py | 1 + .../llm_utility_service/async_client.py | 1 + .../services/match_service/async_client.py | 1 + .../services/metadata_service/async_client.py | 1 + .../migration_service/async_client.py | 1 + .../services/migration_service/client.py | 18 +- .../model_garden_service/async_client.py | 1 + .../services/model_service/async_client.py | 1 + .../services/notebook_service/async_client.py | 1 + .../async_client.py | 1 + .../services/pipeline_service/async_client.py | 1 + .../prediction_service/async_client.py | 1 + .../services/schedule_service/async_client.py | 1 + .../specialist_pool_service/async_client.py | 1 + .../tensorboard_service/async_client.py | 1 + .../services/vizier_service/async_client.py | 1 + google/cloud/aiplatform_v1beta1/__init__.py | 2 + .../services/dataset_service/async_client.py | 1 + .../dataset_service/transports/rest.py | 240 ++++++++++++++++++ .../async_client.py | 1 + .../transports/rest.py | 240 ++++++++++++++++++ .../services/endpoint_service/async_client.py | 1 + .../endpoint_service/transports/rest.py | 240 ++++++++++++++++++ .../evaluation_service/async_client.py | 1 + .../evaluation_service/transports/rest.py | 120 +++++++++ .../async_client.py | 1 + .../transports/rest.py | 120 +++++++++ .../async_client.py | 1 + .../transports/rest.py | 240 ++++++++++++++++++ .../async_client.py | 1 + .../transports/rest.py | 240 ++++++++++++++++++ .../async_client.py | 1 + .../transports/rest.py | 120 +++++++++ .../feature_registry_service/async_client.py | 1 + .../transports/rest.py | 240 ++++++++++++++++++ .../async_client.py | 1 + .../transports/rest.py | 120 +++++++++ .../featurestore_service/async_client.py | 1 + .../featurestore_service/transports/rest.py | 240 ++++++++++++++++++ .../gen_ai_cache_service/async_client.py | 1 + .../gen_ai_cache_service/transports/rest.py | 120 +++++++++ .../gen_ai_tuning_service/async_client.py | 1 + .../gen_ai_tuning_service/transports/rest.py | 120 +++++++++ .../index_endpoint_service/async_client.py | 1 + .../index_endpoint_service/transports/rest.py | 240 ++++++++++++++++++ .../services/index_service/async_client.py | 1 + .../services/index_service/transports/rest.py | 240 ++++++++++++++++++ .../services/job_service/async_client.py | 1 + .../services/job_service/transports/rest.py | 240 ++++++++++++++++++ .../llm_utility_service/async_client.py | 1 + .../llm_utility_service/transports/rest.py | 120 +++++++++ .../services/match_service/async_client.py | 1 + .../services/match_service/transports/rest.py | 120 +++++++++ .../services/metadata_service/async_client.py | 1 + .../metadata_service/transports/rest.py | 240 ++++++++++++++++++ .../migration_service/async_client.py | 1 + .../services/migration_service/client.py | 18 +- .../migration_service/transports/rest.py | 240 ++++++++++++++++++ .../model_garden_service/async_client.py | 1 + .../model_garden_service/transports/rest.py | 120 +++++++++ .../model_monitoring_service/async_client.py | 1 + .../transports/rest.py | 240 ++++++++++++++++++ .../services/model_service/async_client.py | 1 + .../services/model_service/transports/rest.py | 240 ++++++++++++++++++ .../services/notebook_service/async_client.py | 1 + .../notebook_service/transports/rest.py | 240 ++++++++++++++++++ .../async_client.py | 1 + .../transports/rest.py | 240 ++++++++++++++++++ .../services/pipeline_service/async_client.py | 1 + .../pipeline_service/transports/rest.py | 240 ++++++++++++++++++ .../prediction_service/async_client.py | 1 + .../prediction_service/transports/rest.py | 120 +++++++++ .../async_client.py | 1 + .../transports/rest.py | 120 +++++++++ .../reasoning_engine_service/async_client.py | 1 + .../transports/rest.py | 240 ++++++++++++++++++ .../services/schedule_service/async_client.py | 1 + .../schedule_service/transports/rest.py | 240 ++++++++++++++++++ .../specialist_pool_service/async_client.py | 1 + .../transports/rest.py | 240 ++++++++++++++++++ .../tensorboard_service/async_client.py | 1 + .../tensorboard_service/transports/rest.py | 240 ++++++++++++++++++ .../vertex_rag_data_service/async_client.py | 5 + .../vertex_rag_data_service/client.py | 44 ++++ .../transports/rest.py | 240 ++++++++++++++++++ .../vertex_rag_service/async_client.py | 1 + .../vertex_rag_service/transports/rest.py | 120 +++++++++ .../services/vizier_service/async_client.py | 1 + .../vizier_service/transports/rest.py | 240 ++++++++++++++++++ .../aiplatform_v1beta1/types/__init__.py | 2 + .../types/vertex_rag_data.py | 80 ++++++ .../aiplatform_v1/test_dataset_service.py | 36 +-- .../test_deployment_resource_pool_service.py | 12 +- .../aiplatform_v1/test_endpoint_service.py | 6 +- ...test_feature_online_store_admin_service.py | 18 +- .../test_feature_registry_service.py | 12 +- .../test_featurestore_service.py | 24 +- .../test_gen_ai_tuning_service.py | 6 +- .../test_index_endpoint_service.py | 6 +- .../gapic/aiplatform_v1/test_index_service.py | 6 +- .../gapic/aiplatform_v1/test_job_service.py | 48 ++-- .../aiplatform_v1/test_metadata_service.py | 30 +-- .../aiplatform_v1/test_migration_service.py | 32 +-- .../gapic/aiplatform_v1/test_model_service.py | 24 +- .../aiplatform_v1/test_notebook_service.py | 12 +- .../test_persistent_resource_service.py | 6 +- .../aiplatform_v1/test_pipeline_service.py | 12 +- .../aiplatform_v1/test_schedule_service.py | 6 +- .../test_specialist_pool_service.py | 6 +- .../aiplatform_v1/test_tensorboard_service.py | 30 +-- .../aiplatform_v1/test_vizier_service.py | 12 +- .../test_dataset_service.py | 36 +-- .../test_deployment_resource_pool_service.py | 12 +- .../test_endpoint_service.py | 6 +- .../test_extension_registry_service.py | 6 +- ...test_feature_online_store_admin_service.py | 18 +- .../test_feature_registry_service.py | 12 +- .../test_featurestore_service.py | 24 +- .../test_gen_ai_cache_service.py | 6 +- .../test_gen_ai_tuning_service.py | 6 +- .../test_index_endpoint_service.py | 6 +- .../aiplatform_v1beta1/test_index_service.py | 6 +- .../aiplatform_v1beta1/test_job_service.py | 48 ++-- .../test_metadata_service.py | 30 +-- .../test_migration_service.py | 32 +-- .../test_model_garden_service.py | 6 +- .../test_model_monitoring_service.py | 24 +- .../aiplatform_v1beta1/test_model_service.py | 24 +- .../test_notebook_service.py | 18 +- .../test_persistent_resource_service.py | 6 +- .../test_pipeline_service.py | 12 +- .../test_reasoning_engine_service.py | 6 +- .../test_schedule_service.py | 6 +- .../test_specialist_pool_service.py | 6 +- .../test_tensorboard_service.py | 30 +-- .../test_vertex_rag_data_service.py | 71 +++++- .../aiplatform_v1beta1/test_vizier_service.py | 12 +- 148 files changed, 7607 insertions(+), 395 deletions(-) diff --git a/google/cloud/aiplatform_v1/services/dataset_service/async_client.py b/google/cloud/aiplatform_v1/services/dataset_service/async_client.py index 86b0982f95..d7f0ac0249 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/async_client.py b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/async_client.py index 163b5927d9..4ef0999937 100644 --- a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py b/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py index 4c0b82c0e8..9bdb448960 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/async_client.py b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/async_client.py index 2080c1fefd..90ef0e9423 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_service/async_client.py b/google/cloud/aiplatform_v1/services/feature_online_store_service/async_client.py index 24c2058ee3..7341ea3433 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1/services/feature_registry_service/async_client.py b/google/cloud/aiplatform_v1/services/feature_registry_service/async_client.py index 2a2e28dc1d..6dcf41be95 100644 --- a/google/cloud/aiplatform_v1/services/feature_registry_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/feature_registry_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/async_client.py b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/async_client.py index 0f71befed6..6211b6ccb0 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/async_client.py @@ -40,6 +40,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/async_client.py b/google/cloud/aiplatform_v1/services/featurestore_service/async_client.py index aed331944a..31058f1cc2 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/featurestore_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/async_client.py b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/async_client.py index 9fb81b9ddd..3cdb20ae38 100644 --- a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py index cfe8d69444..bee3bf01c5 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1/services/index_service/async_client.py b/google/cloud/aiplatform_v1/services/index_service/async_client.py index 3268898ed6..a3c19ac0d6 100644 --- a/google/cloud/aiplatform_v1/services/index_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/index_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1/services/job_service/async_client.py b/google/cloud/aiplatform_v1/services/job_service/async_client.py index 0c75fff3ee..73001fddda 100644 --- a/google/cloud/aiplatform_v1/services/job_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/job_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1/services/llm_utility_service/async_client.py b/google/cloud/aiplatform_v1/services/llm_utility_service/async_client.py index 7b8c862a6d..5ff7db905d 100644 --- a/google/cloud/aiplatform_v1/services/llm_utility_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/llm_utility_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1/services/match_service/async_client.py b/google/cloud/aiplatform_v1/services/match_service/async_client.py index c430407600..b7e14780e9 100644 --- a/google/cloud/aiplatform_v1/services/match_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/match_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1/services/metadata_service/async_client.py b/google/cloud/aiplatform_v1/services/metadata_service/async_client.py index a777b013c7..7e99d5a43c 100644 --- a/google/cloud/aiplatform_v1/services/metadata_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/metadata_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1/services/migration_service/async_client.py b/google/cloud/aiplatform_v1/services/migration_service/async_client.py index 8b52abe4d5..53c16b2d6b 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/migration_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1/services/migration_service/client.py b/google/cloud/aiplatform_v1/services/migration_service/client.py index 5e2c922f28..1c57f84498 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/client.py +++ b/google/cloud/aiplatform_v1/services/migration_service/client.py @@ -238,40 +238,40 @@ def parse_dataset_path(path: str) -> Dict[str, str]: @staticmethod def dataset_path( project: str, + location: str, dataset: str, ) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/datasets/{dataset}".format( + return "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, + location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", + path, + ) return m.groupdict() if m else {} @staticmethod def dataset_path( project: str, - location: str, dataset: str, ) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format( + return "projects/{project}/datasets/{dataset}".format( project=project, - location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", - path, - ) + m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) return m.groupdict() if m else {} @staticmethod diff --git a/google/cloud/aiplatform_v1/services/model_garden_service/async_client.py b/google/cloud/aiplatform_v1/services/model_garden_service/async_client.py index a1f531e997..948f294445 100644 --- a/google/cloud/aiplatform_v1/services/model_garden_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/model_garden_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1/services/model_service/async_client.py b/google/cloud/aiplatform_v1/services/model_service/async_client.py index 666b1fa688..dfb5473d6a 100644 --- a/google/cloud/aiplatform_v1/services/model_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/model_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1/services/notebook_service/async_client.py b/google/cloud/aiplatform_v1/services/notebook_service/async_client.py index e8137910c1..5d93704cd8 100644 --- a/google/cloud/aiplatform_v1/services/notebook_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/notebook_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1/services/persistent_resource_service/async_client.py b/google/cloud/aiplatform_v1/services/persistent_resource_service/async_client.py index 586d39e546..ee1eaad72d 100644 --- a/google/cloud/aiplatform_v1/services/persistent_resource_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/persistent_resource_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py b/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py index defade15c3..1c544acba9 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1/services/prediction_service/async_client.py b/google/cloud/aiplatform_v1/services/prediction_service/async_client.py index 3dfb274a09..374a77ef70 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/async_client.py @@ -41,6 +41,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1/services/schedule_service/async_client.py b/google/cloud/aiplatform_v1/services/schedule_service/async_client.py index 2e639782e9..5a98016f9b 100644 --- a/google/cloud/aiplatform_v1/services/schedule_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/schedule_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py index 809211b5a2..9a006fe6dd 100644 --- a/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1/services/tensorboard_service/async_client.py b/google/cloud/aiplatform_v1/services/tensorboard_service/async_client.py index 8647c749ff..8bea6e17e2 100644 --- a/google/cloud/aiplatform_v1/services/tensorboard_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/tensorboard_service/async_client.py @@ -40,6 +40,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1/services/vizier_service/async_client.py b/google/cloud/aiplatform_v1/services/vizier_service/async_client.py index 38219516a0..ecc79f0d7c 100644 --- a/google/cloud/aiplatform_v1/services/vizier_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/vizier_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1beta1/__init__.py b/google/cloud/aiplatform_v1beta1/__init__.py index a608a2b635..ecbba56261 100644 --- a/google/cloud/aiplatform_v1beta1/__init__.py +++ b/google/cloud/aiplatform_v1beta1/__init__.py @@ -979,6 +979,7 @@ from .types.value import Value from .types.vertex_rag_data import ImportRagFilesConfig from .types.vertex_rag_data import RagCorpus +from .types.vertex_rag_data import RagEmbeddingModelConfig from .types.vertex_rag_data import RagFile from .types.vertex_rag_data import RagFileChunkingConfig from .types.vertex_rag_data import UploadRagFileConfig @@ -1747,6 +1748,7 @@ "QuestionAnsweringRelevanceSpec", "RagContexts", "RagCorpus", + "RagEmbeddingModelConfig", "RagFile", "RagFileChunkingConfig", "RagQuery", diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py index 704f102748..d6fd8ec83f 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/rest.py index 71ad19ba53..eb8b690287 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/rest.py @@ -1133,6 +1133,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1301,6 +1313,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1479,6 +1503,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1667,6 +1703,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1873,6 +1921,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2061,6 +2121,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2263,6 +2335,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -2451,6 +2535,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -2653,6 +2749,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2841,6 +2949,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", @@ -5485,6 +5605,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -5653,6 +5785,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -5888,6 +6032,18 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -6076,6 +6232,18 @@ def __call__( "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -6342,6 +6510,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -6530,6 +6710,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -6793,6 +6985,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -6981,6 +7185,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -7244,6 +7460,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -7432,6 +7660,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/async_client.py index cb98171b76..e51ee0d8d7 100644 --- a/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/rest.py index 29964f348c..3b1898b0d0 100644 --- a/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/rest.py @@ -718,6 +718,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -886,6 +898,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1064,6 +1088,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1252,6 +1288,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1458,6 +1506,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1646,6 +1706,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1848,6 +1920,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -2036,6 +2120,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -2238,6 +2334,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2426,6 +2534,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", @@ -3731,6 +3851,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -3899,6 +4031,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -4134,6 +4278,18 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -4322,6 +4478,18 @@ def __call__( "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -4588,6 +4756,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -4776,6 +4956,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -5039,6 +5231,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -5227,6 +5431,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -5490,6 +5706,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -5678,6 +5906,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py index 7af3b920b3..28a29497e5 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/rest.py index c54faa4e34..da5a333b13 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/rest.py @@ -790,6 +790,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -958,6 +970,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1136,6 +1160,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1324,6 +1360,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1530,6 +1578,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1718,6 +1778,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1920,6 +1992,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -2108,6 +2192,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -2310,6 +2406,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2498,6 +2606,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", @@ -4063,6 +4183,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -4231,6 +4363,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -4466,6 +4610,18 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -4654,6 +4810,18 @@ def __call__( "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -4920,6 +5088,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -5108,6 +5288,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -5371,6 +5563,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -5559,6 +5763,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -5822,6 +6038,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -6010,6 +6238,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/evaluation_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/evaluation_service/async_client.py index 8b4f588e58..912f2086f1 100644 --- a/google/cloud/aiplatform_v1beta1/services/evaluation_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/evaluation_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1beta1/services/evaluation_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/evaluation_service/transports/rest.py index eb0b0f4592..5402276e46 100644 --- a/google/cloud/aiplatform_v1beta1/services/evaluation_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/evaluation_service/transports/rest.py @@ -1224,6 +1224,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1392,6 +1404,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1627,6 +1651,18 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1815,6 +1851,18 @@ def __call__( "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2081,6 +2129,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2269,6 +2329,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2532,6 +2604,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -2720,6 +2804,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -2983,6 +3079,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3171,6 +3279,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/extension_execution_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/extension_execution_service/async_client.py index d429d3449b..1196e0c767 100644 --- a/google/cloud/aiplatform_v1beta1/services/extension_execution_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/extension_execution_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1beta1/services/extension_execution_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/extension_execution_service/transports/rest.py index b0ae1b8bb2..04e041e8fa 100644 --- a/google/cloud/aiplatform_v1beta1/services/extension_execution_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/extension_execution_service/transports/rest.py @@ -1363,6 +1363,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1531,6 +1543,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1766,6 +1790,18 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1954,6 +1990,18 @@ def __call__( "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2220,6 +2268,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2408,6 +2468,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2671,6 +2743,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -2859,6 +2943,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -3122,6 +3218,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3310,6 +3418,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/extension_registry_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/extension_registry_service/async_client.py index f364b0ac7e..8c442b726b 100644 --- a/google/cloud/aiplatform_v1beta1/services/extension_registry_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/extension_registry_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1beta1/services/extension_registry_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/extension_registry_service/transports/rest.py index 0aba670b8d..6de9414494 100644 --- a/google/cloud/aiplatform_v1beta1/services/extension_registry_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/extension_registry_service/transports/rest.py @@ -707,6 +707,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -875,6 +887,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1053,6 +1077,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1241,6 +1277,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1447,6 +1495,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1635,6 +1695,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1837,6 +1909,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -2025,6 +2109,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -2227,6 +2323,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2415,6 +2523,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", @@ -3691,6 +3811,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -3859,6 +3991,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -4094,6 +4238,18 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -4282,6 +4438,18 @@ def __call__( "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -4548,6 +4716,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -4736,6 +4916,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -4999,6 +5191,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -5187,6 +5391,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -5450,6 +5666,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -5638,6 +5866,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/async_client.py index 9f51e47d01..f1e8656031 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/rest.py index 6c62b4e0d1..75848c0bb5 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/rest.py @@ -993,6 +993,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1161,6 +1173,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1339,6 +1363,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1527,6 +1563,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1733,6 +1781,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1921,6 +1981,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2123,6 +2195,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -2311,6 +2395,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -2513,6 +2609,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2701,6 +2809,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", @@ -4852,6 +4972,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -5020,6 +5152,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -5255,6 +5399,18 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -5443,6 +5599,18 @@ def __call__( "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -5709,6 +5877,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -5897,6 +6077,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -6160,6 +6352,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -6348,6 +6552,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -6611,6 +6827,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -6799,6 +7027,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/async_client.py index e65876ad28..b457b168eb 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/async_client.py @@ -41,6 +41,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/rest.py index cf7363233f..8711c71d73 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/rest.py @@ -1402,6 +1402,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1570,6 +1582,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1805,6 +1829,18 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1993,6 +2029,18 @@ def __call__( "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2259,6 +2307,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2447,6 +2507,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2710,6 +2782,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -2898,6 +2982,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -3161,6 +3257,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3349,6 +3457,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/feature_registry_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/feature_registry_service/async_client.py index 9990545bea..08ee762639 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_registry_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_registry_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1beta1/services/feature_registry_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/feature_registry_service/transports/rest.py index 3d812d34ff..46ea9cf151 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_registry_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_registry_service/transports/rest.py @@ -864,6 +864,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1032,6 +1044,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1210,6 +1234,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1398,6 +1434,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1604,6 +1652,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1792,6 +1852,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1994,6 +2066,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -2182,6 +2266,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -2384,6 +2480,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2572,6 +2680,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", @@ -4352,6 +4472,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -4520,6 +4652,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -4755,6 +4899,18 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -4943,6 +5099,18 @@ def __call__( "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -5209,6 +5377,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -5397,6 +5577,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -5660,6 +5852,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -5848,6 +6052,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -6111,6 +6327,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -6299,6 +6527,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py index 28fa218975..7af487ac54 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py @@ -40,6 +40,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/rest.py index bd9f3a002e..8747e39783 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/rest.py @@ -1517,6 +1517,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1685,6 +1697,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1920,6 +1944,18 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2108,6 +2144,18 @@ def __call__( "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2374,6 +2422,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2562,6 +2622,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2825,6 +2897,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -3013,6 +3097,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -3276,6 +3372,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3464,6 +3572,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py index 4bec282b54..cdf6d852c5 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/rest.py index 813b1d3e45..0488f7f28b 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/rest.py @@ -1213,6 +1213,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1381,6 +1393,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1559,6 +1583,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1747,6 +1783,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1953,6 +2001,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2141,6 +2201,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2343,6 +2415,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -2531,6 +2615,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -2733,6 +2829,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2921,6 +3029,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", @@ -5832,6 +5952,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -6000,6 +6132,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -6235,6 +6379,18 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -6423,6 +6579,18 @@ def __call__( "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -6689,6 +6857,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -6877,6 +7057,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -7140,6 +7332,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -7328,6 +7532,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -7591,6 +7807,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -7779,6 +8007,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/async_client.py index 6fd5e569eb..ead0ddfc58 100644 --- a/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/rest.py index feaf5b6d3c..cf63362920 100644 --- a/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/gen_ai_cache_service/transports/rest.py @@ -1734,6 +1734,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1902,6 +1914,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -2137,6 +2161,18 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2325,6 +2361,18 @@ def __call__( "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2591,6 +2639,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2779,6 +2839,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -3042,6 +3114,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -3230,6 +3314,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -3493,6 +3589,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3681,6 +3789,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/async_client.py index 5d5452b25a..6a14f8694c 100644 --- a/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/rest.py index 7f8bbc0b28..633ff6f118 100644 --- a/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/gen_ai_tuning_service/transports/rest.py @@ -1586,6 +1586,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1754,6 +1766,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1989,6 +2013,18 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2177,6 +2213,18 @@ def __call__( "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2443,6 +2491,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2631,6 +2691,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2894,6 +2966,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -3082,6 +3166,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -3345,6 +3441,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3533,6 +3641,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py index 268bd9bca8..18cadbd1ca 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/rest.py index 0100dc9c57..3d188f0cc2 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/rest.py @@ -804,6 +804,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -972,6 +984,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1150,6 +1174,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1338,6 +1374,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1544,6 +1592,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1732,6 +1792,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1934,6 +2006,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -2122,6 +2206,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -2324,6 +2420,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2512,6 +2620,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", @@ -4101,6 +4221,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -4269,6 +4401,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -4504,6 +4648,18 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -4692,6 +4848,18 @@ def __call__( "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -4958,6 +5126,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -5146,6 +5326,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -5409,6 +5601,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -5597,6 +5801,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -5860,6 +6076,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -6048,6 +6276,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/index_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/index_service/async_client.py index 58617fa0ef..389f3807f1 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/index_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1beta1/services/index_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/index_service/transports/rest.py index c612ab16d6..b347df3a85 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/index_service/transports/rest.py @@ -756,6 +756,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -924,6 +936,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1102,6 +1126,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1290,6 +1326,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1496,6 +1544,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1684,6 +1744,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1886,6 +1958,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -2074,6 +2158,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -2276,6 +2372,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2464,6 +2572,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", @@ -3930,6 +4050,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -4098,6 +4230,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -4333,6 +4477,18 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -4521,6 +4677,18 @@ def __call__( "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -4787,6 +4955,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -4975,6 +5155,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -5238,6 +5430,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -5426,6 +5630,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -5689,6 +5905,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -5877,6 +6105,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py index a225f26a2f..758b4446a7 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/job_service/transports/rest.py index 928f54b644..e40a54de38 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/transports/rest.py @@ -1562,6 +1562,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1730,6 +1742,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1908,6 +1932,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2096,6 +2132,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2302,6 +2350,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2490,6 +2550,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2692,6 +2764,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -2880,6 +2964,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -3082,6 +3178,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3270,6 +3378,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", @@ -7553,6 +7673,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -7721,6 +7853,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -7956,6 +8100,18 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -8144,6 +8300,18 @@ def __call__( "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -8410,6 +8578,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -8598,6 +8778,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -8861,6 +9053,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -9049,6 +9253,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -9312,6 +9528,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -9500,6 +9728,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/llm_utility_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/llm_utility_service/async_client.py index 451e42770a..67f41c3171 100644 --- a/google/cloud/aiplatform_v1beta1/services/llm_utility_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/llm_utility_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1beta1/services/llm_utility_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/llm_utility_service/transports/rest.py index 27967ed52e..4ae07a330a 100644 --- a/google/cloud/aiplatform_v1beta1/services/llm_utility_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/llm_utility_service/transports/rest.py @@ -1227,6 +1227,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1395,6 +1407,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1630,6 +1654,18 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1818,6 +1854,18 @@ def __call__( "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2084,6 +2132,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2272,6 +2332,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2535,6 +2607,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -2723,6 +2807,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -2986,6 +3082,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3174,6 +3282,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/match_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/match_service/async_client.py index 26f7ea57c7..f1611c504a 100644 --- a/google/cloud/aiplatform_v1beta1/services/match_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/match_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1beta1/services/match_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/match_service/transports/rest.py index 36d8b09e88..b70be57035 100644 --- a/google/cloud/aiplatform_v1beta1/services/match_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/match_service/transports/rest.py @@ -1356,6 +1356,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1524,6 +1536,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1759,6 +1783,18 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1947,6 +1983,18 @@ def __call__( "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2213,6 +2261,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2401,6 +2461,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2664,6 +2736,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -2852,6 +2936,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -3115,6 +3211,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3303,6 +3411,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py index 2e6355ca16..4cd66f1f6c 100644 --- a/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/rest.py index ae468ad694..ef3690d91c 100644 --- a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/rest.py @@ -1546,6 +1546,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1714,6 +1726,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1892,6 +1916,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2080,6 +2116,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2286,6 +2334,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2474,6 +2534,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2676,6 +2748,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -2864,6 +2948,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -3066,6 +3162,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3254,6 +3362,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", @@ -7211,6 +7331,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -7379,6 +7511,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -7614,6 +7758,18 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -7802,6 +7958,18 @@ def __call__( "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -8068,6 +8236,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -8256,6 +8436,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -8519,6 +8711,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -8707,6 +8911,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -8970,6 +9186,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -9158,6 +9386,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py index 160c156a21..e7d98f8078 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/client.py b/google/cloud/aiplatform_v1beta1/services/migration_service/client.py index b54b12bcba..9ca3a48da6 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/client.py @@ -216,40 +216,40 @@ def parse_annotated_dataset_path(path: str) -> Dict[str, str]: @staticmethod def dataset_path( project: str, - location: str, dataset: str, ) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format( + return "projects/{project}/datasets/{dataset}".format( project=project, - location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", - path, - ) + m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) return m.groupdict() if m else {} @staticmethod def dataset_path( project: str, + location: str, dataset: str, ) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/datasets/{dataset}".format( + return "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, + location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", + path, + ) return m.groupdict() if m else {} @staticmethod diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/migration_service/transports/rest.py index 6185b887d5..3a8b14120c 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/transports/rest.py @@ -609,6 +609,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -777,6 +789,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -955,6 +979,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1143,6 +1179,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1349,6 +1397,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1537,6 +1597,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1739,6 +1811,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -1927,6 +2011,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -2129,6 +2225,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2317,6 +2425,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", @@ -3301,6 +3421,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -3469,6 +3601,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -3704,6 +3848,18 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -3892,6 +4048,18 @@ def __call__( "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -4158,6 +4326,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -4346,6 +4526,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -4609,6 +4801,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -4797,6 +5001,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -5060,6 +5276,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -5248,6 +5476,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/model_garden_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/model_garden_service/async_client.py index f31fea297f..e3bd80325d 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_garden_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/model_garden_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1beta1/services/model_garden_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/model_garden_service/transports/rest.py index 51be57b745..e3eda9c3cc 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_garden_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/model_garden_service/transports/rest.py @@ -1349,6 +1349,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1517,6 +1529,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1752,6 +1776,18 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1940,6 +1976,18 @@ def __call__( "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2206,6 +2254,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2394,6 +2454,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2657,6 +2729,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -2845,6 +2929,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -3108,6 +3204,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3296,6 +3404,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/async_client.py index dbbdfd96b6..b2a87345a6 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/rest.py index ab37480203..58941067e3 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/rest.py @@ -917,6 +917,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1085,6 +1097,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1263,6 +1287,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1451,6 +1487,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1657,6 +1705,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1845,6 +1905,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2047,6 +2119,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -2235,6 +2319,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -2437,6 +2533,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2625,6 +2733,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", @@ -4544,6 +4664,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -4712,6 +4844,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -4947,6 +5091,18 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -5135,6 +5291,18 @@ def __call__( "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -5401,6 +5569,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -5589,6 +5769,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -5852,6 +6044,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -6040,6 +6244,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -6303,6 +6519,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -6491,6 +6719,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py index 3818ddb515..c65ce3aa59 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/model_service/transports/rest.py index 690e83e2cd..2f95412d07 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/transports/rest.py @@ -1106,6 +1106,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1274,6 +1286,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1452,6 +1476,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1640,6 +1676,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1846,6 +1894,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2034,6 +2094,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2236,6 +2308,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -2424,6 +2508,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -2626,6 +2722,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2814,6 +2922,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", @@ -5398,6 +5518,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -5566,6 +5698,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -5801,6 +5945,18 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -5989,6 +6145,18 @@ def __call__( "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -6255,6 +6423,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -6443,6 +6623,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -6706,6 +6898,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -6894,6 +7098,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -7157,6 +7373,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -7345,6 +7573,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/notebook_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/notebook_service/async_client.py index 9076862b96..b0cd94ed52 100644 --- a/google/cloud/aiplatform_v1beta1/services/notebook_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/notebook_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1beta1/services/notebook_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/notebook_service/transports/rest.py index e64b0668e7..d000d33772 100644 --- a/google/cloud/aiplatform_v1beta1/services/notebook_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/notebook_service/transports/rest.py @@ -1034,6 +1034,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1202,6 +1214,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1380,6 +1404,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1568,6 +1604,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1774,6 +1822,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1962,6 +2022,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2164,6 +2236,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -2352,6 +2436,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -2554,6 +2650,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2742,6 +2850,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", @@ -5054,6 +5174,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -5222,6 +5354,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -5457,6 +5601,18 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -5645,6 +5801,18 @@ def __call__( "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -5911,6 +6079,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -6099,6 +6279,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -6362,6 +6554,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -6550,6 +6754,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -6813,6 +7029,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -7001,6 +7229,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/async_client.py index 2203cc9bc8..0001ec8de5 100644 --- a/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/transports/rest.py index afb8d81d23..233d6fb00d 100644 --- a/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/transports/rest.py @@ -748,6 +748,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -916,6 +928,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1094,6 +1118,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1282,6 +1318,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1488,6 +1536,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1676,6 +1736,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1878,6 +1950,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -2066,6 +2150,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -2268,6 +2364,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2456,6 +2564,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", @@ -3865,6 +3985,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -4033,6 +4165,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -4268,6 +4412,18 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -4456,6 +4612,18 @@ def __call__( "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -4722,6 +4890,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -4910,6 +5090,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -5173,6 +5365,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -5361,6 +5565,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -5624,6 +5840,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -5812,6 +6040,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py index 4925666eec..c42543dc55 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/rest.py index f7ad4e0f9c..aad6b80c66 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/rest.py @@ -906,6 +906,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1074,6 +1086,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1252,6 +1276,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1440,6 +1476,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1646,6 +1694,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1834,6 +1894,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2036,6 +2108,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -2224,6 +2308,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -2426,6 +2522,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2614,6 +2722,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", @@ -4581,6 +4701,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -4749,6 +4881,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -4984,6 +5128,18 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -5172,6 +5328,18 @@ def __call__( "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -5438,6 +5606,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -5626,6 +5806,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -5889,6 +6081,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -6077,6 +6281,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -6340,6 +6556,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -6528,6 +6756,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py index 6c39e38a70..3164e665ea 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py @@ -41,6 +41,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/rest.py index cd2d6c2a80..efcec360ed 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/rest.py @@ -2668,6 +2668,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -2836,6 +2848,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -3071,6 +3095,18 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -3259,6 +3295,18 @@ def __call__( "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -3525,6 +3573,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -3713,6 +3773,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -3976,6 +4048,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -4164,6 +4248,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -4427,6 +4523,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -4615,6 +4723,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/async_client.py index 573bdee799..d46dc9d9a6 100644 --- a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/rest.py index ae8ec433af..98a07b2e9b 100644 --- a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/rest.py @@ -1237,6 +1237,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1405,6 +1417,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1640,6 +1664,18 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1828,6 +1864,18 @@ def __call__( "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2094,6 +2142,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2282,6 +2342,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2545,6 +2617,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -2733,6 +2817,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -2996,6 +3092,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3184,6 +3292,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/async_client.py index c719b61ca4..eea1a30ea4 100644 --- a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/transports/rest.py index 63a7e14db3..8906d50c32 100644 --- a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/transports/rest.py @@ -708,6 +708,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -876,6 +888,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1054,6 +1078,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1242,6 +1278,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1448,6 +1496,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1636,6 +1696,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1838,6 +1910,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -2026,6 +2110,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -2228,6 +2324,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2416,6 +2524,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", @@ -3707,6 +3827,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -3875,6 +4007,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -4110,6 +4254,18 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -4298,6 +4454,18 @@ def __call__( "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -4564,6 +4732,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -4752,6 +4932,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -5015,6 +5207,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -5203,6 +5407,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -5466,6 +5682,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -5654,6 +5882,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/schedule_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/schedule_service/async_client.py index 204a041dd1..a1b06fea58 100644 --- a/google/cloud/aiplatform_v1beta1/services/schedule_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/schedule_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1beta1/services/schedule_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/schedule_service/transports/rest.py index 0e473620da..dbef129414 100644 --- a/google/cloud/aiplatform_v1beta1/services/schedule_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/schedule_service/transports/rest.py @@ -732,6 +732,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -900,6 +912,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1078,6 +1102,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1266,6 +1302,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1472,6 +1520,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1660,6 +1720,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1862,6 +1934,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -2050,6 +2134,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -2252,6 +2348,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2440,6 +2548,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", @@ -3880,6 +4000,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -4048,6 +4180,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -4283,6 +4427,18 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -4471,6 +4627,18 @@ def __call__( "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -4737,6 +4905,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -4925,6 +5105,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -5188,6 +5380,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -5376,6 +5580,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -5639,6 +5855,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -5827,6 +6055,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py index cf45a9c5a1..8c84c00523 100644 --- a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/rest.py index 2f957c1483..992d135512 100644 --- a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/rest.py @@ -713,6 +713,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -881,6 +893,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1059,6 +1083,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1247,6 +1283,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1453,6 +1501,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1641,6 +1701,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1843,6 +1915,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -2031,6 +2115,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -2233,6 +2329,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2421,6 +2529,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", @@ -3709,6 +3829,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -3877,6 +4009,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -4112,6 +4256,18 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -4300,6 +4456,18 @@ def __call__( "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -4566,6 +4734,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -4754,6 +4934,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -5017,6 +5209,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -5205,6 +5409,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -5468,6 +5684,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -5656,6 +5884,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py index 9ccc6c0309..9f506e1e87 100644 --- a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py @@ -40,6 +40,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/rest.py index 657f1ebc42..8b8d1417e3 100644 --- a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/rest.py @@ -1542,6 +1542,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1710,6 +1722,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1888,6 +1912,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2076,6 +2112,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2282,6 +2330,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2470,6 +2530,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2672,6 +2744,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -2860,6 +2944,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -3062,6 +3158,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3250,6 +3358,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", @@ -7162,6 +7282,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -7330,6 +7462,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -7565,6 +7709,18 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -7753,6 +7909,18 @@ def __call__( "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -8019,6 +8187,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -8207,6 +8387,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -8470,6 +8662,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -8658,6 +8862,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -8921,6 +9137,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -9109,6 +9337,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/async_client.py index 064c8e08dd..fda970b069 100644 --- a/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER @@ -74,6 +75,10 @@ class VertexRagDataServiceAsyncClient: _DEFAULT_ENDPOINT_TEMPLATE = VertexRagDataServiceClient._DEFAULT_ENDPOINT_TEMPLATE _DEFAULT_UNIVERSE = VertexRagDataServiceClient._DEFAULT_UNIVERSE + endpoint_path = staticmethod(VertexRagDataServiceClient.endpoint_path) + parse_endpoint_path = staticmethod(VertexRagDataServiceClient.parse_endpoint_path) + model_path = staticmethod(VertexRagDataServiceClient.model_path) + parse_model_path = staticmethod(VertexRagDataServiceClient.parse_model_path) rag_corpus_path = staticmethod(VertexRagDataServiceClient.rag_corpus_path) parse_rag_corpus_path = staticmethod( VertexRagDataServiceClient.parse_rag_corpus_path diff --git a/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/client.py b/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/client.py index e118448ba2..9a44d8dfd8 100644 --- a/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/client.py @@ -194,6 +194,50 @@ def transport(self) -> VertexRagDataServiceTransport: """ return self._transport + @staticmethod + def endpoint_path( + project: str, + location: str, + endpoint: str, + ) -> str: + """Returns a fully-qualified endpoint string.""" + return "projects/{project}/locations/{location}/endpoints/{endpoint}".format( + project=project, + location=location, + endpoint=endpoint, + ) + + @staticmethod + def parse_endpoint_path(path: str) -> Dict[str, str]: + """Parses a endpoint path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def model_path( + project: str, + location: str, + model: str, + ) -> str: + """Returns a fully-qualified model string.""" + return "projects/{project}/locations/{location}/models/{model}".format( + project=project, + location=location, + model=model, + ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str, str]: + """Parses a model path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def rag_corpus_path( project: str, diff --git a/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/transports/rest.py index 5495fd28ce..f1485a7361 100644 --- a/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/transports/rest.py @@ -830,6 +830,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -998,6 +1010,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1176,6 +1200,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1364,6 +1400,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1570,6 +1618,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1758,6 +1818,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1960,6 +2032,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -2148,6 +2232,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -2350,6 +2446,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2538,6 +2646,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", @@ -4202,6 +4322,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -4370,6 +4502,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -4605,6 +4749,18 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -4793,6 +4949,18 @@ def __call__( "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -5059,6 +5227,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -5247,6 +5427,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -5510,6 +5702,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -5698,6 +5902,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -5961,6 +6177,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -6149,6 +6377,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/async_client.py index c8e1d49a86..bc7fe08de0 100644 --- a/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/transports/rest.py index 84fb269272..63a96211b0 100644 --- a/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/transports/rest.py @@ -1224,6 +1224,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1392,6 +1404,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1627,6 +1651,18 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1815,6 +1851,18 @@ def __call__( "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2081,6 +2129,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2269,6 +2329,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2532,6 +2604,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -2720,6 +2804,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -2983,6 +3079,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3171,6 +3279,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py index 6ab69e261c..1c0f39a8ab 100644 --- a/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/rest.py index efb3847af8..4bdca85e92 100644 --- a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/rest.py @@ -970,6 +970,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1138,6 +1150,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -1316,6 +1340,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1504,6 +1540,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1710,6 +1758,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -1898,6 +1958,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -2100,6 +2172,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -2288,6 +2372,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -2490,6 +2586,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2678,6 +2786,18 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", @@ -4913,6 +5033,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -5081,6 +5213,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", @@ -5316,6 +5460,18 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -5504,6 +5660,18 @@ def __call__( "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "delete", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -5770,6 +5938,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -5958,6 +6138,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", @@ -6221,6 +6413,18 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", @@ -6409,6 +6613,18 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, { "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", @@ -6672,6 +6888,18 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -6860,6 +7088,18 @@ def __call__( "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/types/__init__.py b/google/cloud/aiplatform_v1beta1/types/__init__.py index 1df59da78a..c00961f1ff 100644 --- a/google/cloud/aiplatform_v1beta1/types/__init__.py +++ b/google/cloud/aiplatform_v1beta1/types/__init__.py @@ -1114,6 +1114,7 @@ from .vertex_rag_data import ( ImportRagFilesConfig, RagCorpus, + RagEmbeddingModelConfig, RagFile, RagFileChunkingConfig, UploadRagFileConfig, @@ -2025,6 +2026,7 @@ "Value", "ImportRagFilesConfig", "RagCorpus", + "RagEmbeddingModelConfig", "RagFile", "RagFileChunkingConfig", "UploadRagFileConfig", diff --git a/google/cloud/aiplatform_v1beta1/types/vertex_rag_data.py b/google/cloud/aiplatform_v1beta1/types/vertex_rag_data.py index 39a98a7209..dd0df1e600 100644 --- a/google/cloud/aiplatform_v1beta1/types/vertex_rag_data.py +++ b/google/cloud/aiplatform_v1beta1/types/vertex_rag_data.py @@ -26,6 +26,7 @@ __protobuf__ = proto.module( package="google.cloud.aiplatform.v1beta1", manifest={ + "RagEmbeddingModelConfig", "RagCorpus", "RagFile", "RagFileChunkingConfig", @@ -35,6 +36,64 @@ ) +class RagEmbeddingModelConfig(proto.Message): + r"""Config for the embedding model to use for RAG. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + vertex_prediction_endpoint (google.cloud.aiplatform_v1beta1.types.RagEmbeddingModelConfig.VertexPredictionEndpoint): + The Vertex AI Prediction Endpoint that either + refers to a publisher model or an endpoint that + is hosting a 1P fine-tuned text embedding model. + Endpoints hosting non-1P fine-tuned text + embedding models are currently not supported. + + This field is a member of `oneof`_ ``model_config``. + """ + + class VertexPredictionEndpoint(proto.Message): + r"""Config representing a model hosted on Vertex Prediction + Endpoint. + + Attributes: + endpoint (str): + Required. The endpoint resource name. Format: + ``projects/{project}/locations/{location}/publishers/{publisher}/models/{model}`` + or + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + model (str): + Output only. The resource name of the model that is deployed + on the endpoint. Present only when the endpoint is not a + publisher model. Pattern: + ``projects/{project}/locations/{location}/models/{model}`` + model_version_id (str): + Output only. Version ID of the model that is + deployed on the endpoint. Present only when the + endpoint is not a publisher model. + """ + + endpoint: str = proto.Field( + proto.STRING, + number=1, + ) + model: str = proto.Field( + proto.STRING, + number=2, + ) + model_version_id: str = proto.Field( + proto.STRING, + number=3, + ) + + vertex_prediction_endpoint: VertexPredictionEndpoint = proto.Field( + proto.MESSAGE, + number=1, + oneof="model_config", + message=VertexPredictionEndpoint, + ) + + class RagCorpus(proto.Message): r"""A RagCorpus is a RagFile container and a project can have multiple RagCorpora. @@ -49,6 +108,9 @@ class RagCorpus(proto.Message): can consist of any UTF-8 characters. description (str): Optional. The description of the RagCorpus. + rag_embedding_model_config (google.cloud.aiplatform_v1beta1.types.RagEmbeddingModelConfig): + Optional. Immutable. The embedding model + config of the RagCorpus. create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this RagCorpus was created. @@ -69,6 +131,11 @@ class RagCorpus(proto.Message): proto.STRING, number=3, ) + rag_embedding_model_config: "RagEmbeddingModelConfig" = proto.Field( + proto.MESSAGE, + number=6, + message="RagEmbeddingModelConfig", + ) create_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=4, @@ -263,6 +330,15 @@ class ImportRagFilesConfig(proto.Message): rag_file_chunking_config (google.cloud.aiplatform_v1beta1.types.RagFileChunkingConfig): Specifies the size and overlap of chunks after importing RagFiles. + max_embedding_requests_per_min (int): + Optional. The max number of queries per + minute that this job is allowed to make to the + embedding model specified on the corpus. This + value is specific to this job and not shared + across other import jobs. Consult the Quotas + page on the project to set an appropriate value + here. If unspecified, a default value of 1,000 + QPM would be used. """ gcs_source: io.GcsSource = proto.Field( @@ -282,6 +358,10 @@ class ImportRagFilesConfig(proto.Message): number=4, message="RagFileChunkingConfig", ) + max_embedding_requests_per_min: int = proto.Field( + proto.INT32, + number=5, + ) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/tests/unit/gapic/aiplatform_v1/test_dataset_service.py b/tests/unit/gapic/aiplatform_v1/test_dataset_service.py index b126bdad56..48e6112747 100644 --- a/tests/unit/gapic/aiplatform_v1/test_dataset_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_dataset_service.py @@ -2742,13 +2742,13 @@ def test_list_datasets_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_datasets(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -6123,13 +6123,13 @@ def test_list_dataset_versions_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_dataset_versions(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -7087,13 +7087,13 @@ def test_list_data_items_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_data_items(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -7596,13 +7596,13 @@ def test_search_data_items_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("dataset", ""),)), ) pager = client.search_data_items(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -8192,13 +8192,13 @@ def test_list_saved_queries_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_saved_queries(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -9559,13 +9559,13 @@ def test_list_annotations_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_annotations(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/aiplatform_v1/test_deployment_resource_pool_service.py b/tests/unit/gapic/aiplatform_v1/test_deployment_resource_pool_service.py index b3e433b437..76e45ca692 100644 --- a/tests/unit/gapic/aiplatform_v1/test_deployment_resource_pool_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_deployment_resource_pool_service.py @@ -2535,13 +2535,13 @@ def test_list_deployment_resource_pools_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_deployment_resource_pools(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -3553,15 +3553,15 @@ def test_query_deployed_models_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata( (("deployment_resource_pool", ""),) ), ) pager = client.query_deployed_models(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py b/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py index d91d623d52..dc12d56124 100644 --- a/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py @@ -2372,13 +2372,13 @@ def test_list_endpoints_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_endpoints(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/aiplatform_v1/test_feature_online_store_admin_service.py b/tests/unit/gapic/aiplatform_v1/test_feature_online_store_admin_service.py index de743576b0..0cfe30d3e6 100644 --- a/tests/unit/gapic/aiplatform_v1/test_feature_online_store_admin_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_feature_online_store_admin_service.py @@ -2569,13 +2569,13 @@ def test_list_feature_online_stores_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_feature_online_stores(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -4845,13 +4845,13 @@ def test_list_feature_views_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_feature_views(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -7057,13 +7057,13 @@ def test_list_feature_view_syncs_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_feature_view_syncs(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/aiplatform_v1/test_feature_registry_service.py b/tests/unit/gapic/aiplatform_v1/test_feature_registry_service.py index 1b868f7140..f72da8b815 100644 --- a/tests/unit/gapic/aiplatform_v1/test_feature_registry_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_feature_registry_service.py @@ -2490,13 +2490,13 @@ def test_list_feature_groups_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_feature_groups(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -4673,13 +4673,13 @@ def test_list_features_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_features(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/aiplatform_v1/test_featurestore_service.py b/tests/unit/gapic/aiplatform_v1/test_featurestore_service.py index 28cef80a73..b880e000ce 100644 --- a/tests/unit/gapic/aiplatform_v1/test_featurestore_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_featurestore_service.py @@ -2454,13 +2454,13 @@ def test_list_featurestores_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_featurestores(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -4647,13 +4647,13 @@ def test_list_entity_types_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_entity_types(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -7224,13 +7224,13 @@ def test_list_features_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_features(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -10146,13 +10146,13 @@ def test_search_features_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("location", ""),)), ) pager = client.search_features(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/aiplatform_v1/test_gen_ai_tuning_service.py b/tests/unit/gapic/aiplatform_v1/test_gen_ai_tuning_service.py index cbac28c368..80d47e4cfc 100644 --- a/tests/unit/gapic/aiplatform_v1/test_gen_ai_tuning_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_gen_ai_tuning_service.py @@ -2405,13 +2405,13 @@ def test_list_tuning_jobs_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_tuning_jobs(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py b/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py index 22e8bf60a6..78ded036cf 100644 --- a/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py @@ -2475,13 +2475,13 @@ def test_list_index_endpoints_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_index_endpoints(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/aiplatform_v1/test_index_service.py b/tests/unit/gapic/aiplatform_v1/test_index_service.py index d936585677..990c487298 100644 --- a/tests/unit/gapic/aiplatform_v1/test_index_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_index_service.py @@ -2291,13 +2291,13 @@ def test_list_indexes_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_indexes(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/aiplatform_v1/test_job_service.py b/tests/unit/gapic/aiplatform_v1/test_job_service.py index d5619fa394..7a1aad040d 100644 --- a/tests/unit/gapic/aiplatform_v1/test_job_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_job_service.py @@ -2332,13 +2332,13 @@ def test_list_custom_jobs_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_custom_jobs(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -4560,13 +4560,13 @@ def test_list_data_labeling_jobs_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_data_labeling_jobs(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -6783,13 +6783,13 @@ def test_list_hyperparameter_tuning_jobs_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_hyperparameter_tuning_jobs(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -8898,13 +8898,13 @@ def test_list_nas_jobs_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_nas_jobs(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -10602,13 +10602,13 @@ def test_list_nas_trial_details_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_nas_trial_details(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -12071,13 +12071,13 @@ def test_list_batch_prediction_jobs_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_batch_prediction_jobs(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -13956,15 +13956,15 @@ def test_search_model_deployment_monitoring_stats_anomalies_pager( RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata( (("model_deployment_monitoring_job", ""),) ), ) pager = client.search_model_deployment_monitoring_stats_anomalies(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -15012,13 +15012,13 @@ def test_list_model_deployment_monitoring_jobs_pager(transport_name: str = "grpc RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_model_deployment_monitoring_jobs(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/aiplatform_v1/test_metadata_service.py b/tests/unit/gapic/aiplatform_v1/test_metadata_service.py index 7072cb69c3..c0858993b9 100644 --- a/tests/unit/gapic/aiplatform_v1/test_metadata_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_metadata_service.py @@ -2417,13 +2417,13 @@ def test_list_metadata_stores_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_metadata_stores(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -4207,13 +4207,13 @@ def test_list_artifacts_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_artifacts(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -6736,13 +6736,13 @@ def test_list_contexts_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_contexts(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -10846,13 +10846,13 @@ def test_list_executions_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_executions(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -14229,13 +14229,13 @@ def test_list_metadata_schemas_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_metadata_schemas(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/aiplatform_v1/test_migration_service.py b/tests/unit/gapic/aiplatform_v1/test_migration_service.py index 1af26857c4..72c22f4246 100644 --- a/tests/unit/gapic/aiplatform_v1/test_migration_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_migration_service.py @@ -1597,13 +1597,13 @@ def test_search_migratable_resources_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.search_migratable_resources(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -3559,19 +3559,22 @@ def test_parse_dataset_path(): def test_dataset_path(): project = "squid" - dataset = "clam" - expected = "projects/{project}/datasets/{dataset}".format( + location = "clam" + dataset = "whelk" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, + location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, dataset) + actual = MigrationServiceClient.dataset_path(project, location, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "whelk", - "dataset": "octopus", + "project": "octopus", + "location": "oyster", + "dataset": "nudibranch", } path = MigrationServiceClient.dataset_path(**expected) @@ -3581,22 +3584,19 @@ def test_parse_dataset_path(): def test_dataset_path(): - project = "oyster" - location = "nudibranch" - dataset = "cuttlefish" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( + project = "cuttlefish" + dataset = "mussel" + expected = "projects/{project}/datasets/{dataset}".format( project=project, - location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, location, dataset) + actual = MigrationServiceClient.dataset_path(project, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "mussel", - "location": "winkle", + "project": "winkle", "dataset": "nautilus", } path = MigrationServiceClient.dataset_path(**expected) diff --git a/tests/unit/gapic/aiplatform_v1/test_model_service.py b/tests/unit/gapic/aiplatform_v1/test_model_service.py index 8198ef26bf..e1e95070aa 100644 --- a/tests/unit/gapic/aiplatform_v1/test_model_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_model_service.py @@ -2380,13 +2380,13 @@ def test_list_models_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_models(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -2969,13 +2969,13 @@ def test_list_model_versions_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", ""),)), ) pager = client.list_model_versions(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -8145,13 +8145,13 @@ def test_list_model_evaluations_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_model_evaluations(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -9138,13 +9138,13 @@ def test_list_model_evaluation_slices_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_model_evaluation_slices(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/aiplatform_v1/test_notebook_service.py b/tests/unit/gapic/aiplatform_v1/test_notebook_service.py index 8a2d653263..ae6d31eb2c 100644 --- a/tests/unit/gapic/aiplatform_v1/test_notebook_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_notebook_service.py @@ -2463,13 +2463,13 @@ def test_list_notebook_runtime_templates_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_notebook_runtime_templates(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -4799,13 +4799,13 @@ def test_list_notebook_runtimes_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_notebook_runtimes(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/aiplatform_v1/test_persistent_resource_service.py b/tests/unit/gapic/aiplatform_v1/test_persistent_resource_service.py index db08a0eae8..bcd20c37b2 100644 --- a/tests/unit/gapic/aiplatform_v1/test_persistent_resource_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_persistent_resource_service.py @@ -2505,13 +2505,13 @@ def test_list_persistent_resources_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_persistent_resources(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py b/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py index 0bc897a722..474aa2039c 100644 --- a/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py @@ -2461,13 +2461,13 @@ def test_list_training_pipelines_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_training_pipelines(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -4682,13 +4682,13 @@ def test_list_pipeline_jobs_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_pipeline_jobs(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/aiplatform_v1/test_schedule_service.py b/tests/unit/gapic/aiplatform_v1/test_schedule_service.py index e2012b2f86..fb8269bb8c 100644 --- a/tests/unit/gapic/aiplatform_v1/test_schedule_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_schedule_service.py @@ -2769,13 +2769,13 @@ def test_list_schedules_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_schedules(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py b/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py index bc57eb5d36..311449ea7f 100644 --- a/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py @@ -2461,13 +2461,13 @@ def test_list_specialist_pools_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_specialist_pools(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/aiplatform_v1/test_tensorboard_service.py b/tests/unit/gapic/aiplatform_v1/test_tensorboard_service.py index 2a251348bd..45658e520c 100644 --- a/tests/unit/gapic/aiplatform_v1/test_tensorboard_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_tensorboard_service.py @@ -2830,13 +2830,13 @@ def test_list_tensorboards_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_tensorboards(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -5880,13 +5880,13 @@ def test_list_tensorboard_experiments_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_tensorboard_experiments(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -8533,13 +8533,13 @@ def test_list_tensorboard_runs_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_tensorboard_runs(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -11275,13 +11275,13 @@ def test_list_tensorboard_time_series_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_tensorboard_time_series(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -14307,15 +14307,15 @@ def test_export_tensorboard_time_series_data_pager(transport_name: str = "grpc") RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata( (("tensorboard_time_series", ""),) ), ) pager = client.export_tensorboard_time_series_data(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/aiplatform_v1/test_vizier_service.py b/tests/unit/gapic/aiplatform_v1/test_vizier_service.py index 614f98c528..5fb4cb008d 100644 --- a/tests/unit/gapic/aiplatform_v1/test_vizier_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_vizier_service.py @@ -2306,13 +2306,13 @@ def test_list_studies_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_studies(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -4679,13 +4679,13 @@ def test_list_trials_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_trials(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py index 9a2c3489d7..323f27feb3 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py @@ -2744,13 +2744,13 @@ def test_list_datasets_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_datasets(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -6125,13 +6125,13 @@ def test_list_dataset_versions_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_dataset_versions(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -7089,13 +7089,13 @@ def test_list_data_items_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_data_items(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -7598,13 +7598,13 @@ def test_search_data_items_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("dataset", ""),)), ) pager = client.search_data_items(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -8194,13 +8194,13 @@ def test_list_saved_queries_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_saved_queries(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -9561,13 +9561,13 @@ def test_list_annotations_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_annotations(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_deployment_resource_pool_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_deployment_resource_pool_service.py index e47d43dbae..8c0c73f84c 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_deployment_resource_pool_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_deployment_resource_pool_service.py @@ -2537,13 +2537,13 @@ def test_list_deployment_resource_pools_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_deployment_resource_pools(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -3555,15 +3555,15 @@ def test_query_deployed_models_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata( (("deployment_resource_pool", ""),) ), ) pager = client.query_deployed_models(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py index 422e63695f..c4911f7d86 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py @@ -2372,13 +2372,13 @@ def test_list_endpoints_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_endpoints(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_extension_registry_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_extension_registry_service.py index 09e71a3aa4..9a7ff5be15 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_extension_registry_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_extension_registry_service.py @@ -2397,13 +2397,13 @@ def test_list_extensions_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_extensions(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_feature_online_store_admin_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_feature_online_store_admin_service.py index d07a6565dd..2f703e87ab 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_feature_online_store_admin_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_feature_online_store_admin_service.py @@ -2570,13 +2570,13 @@ def test_list_feature_online_stores_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_feature_online_stores(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -4862,13 +4862,13 @@ def test_list_feature_views_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_feature_views(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -7074,13 +7074,13 @@ def test_list_feature_view_syncs_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_feature_view_syncs(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_feature_registry_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_feature_registry_service.py index 80e6e4116e..c3893d9463 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_feature_registry_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_feature_registry_service.py @@ -2492,13 +2492,13 @@ def test_list_feature_groups_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_feature_groups(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -4675,13 +4675,13 @@ def test_list_features_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_features(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py index 316c313671..285794b653 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py @@ -2455,13 +2455,13 @@ def test_list_featurestores_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_featurestores(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -4648,13 +4648,13 @@ def test_list_entity_types_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_entity_types(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -7225,13 +7225,13 @@ def test_list_features_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_features(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -10147,13 +10147,13 @@ def test_search_features_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("location", ""),)), ) pager = client.search_features(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_cache_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_cache_service.py index 67610b3402..0d4bc4f537 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_cache_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_cache_service.py @@ -3218,13 +3218,13 @@ def test_list_cached_contents_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_cached_contents(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_tuning_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_tuning_service.py index 7a8d11d0b1..49ba71d4c9 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_tuning_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_tuning_service.py @@ -2406,13 +2406,13 @@ def test_list_tuning_jobs_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_tuning_jobs(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py index c1c16e7289..9dcadbdcd4 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py @@ -2475,13 +2475,13 @@ def test_list_index_endpoints_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_index_endpoints(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_index_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_index_service.py index bbe60b50f0..88a97d8897 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_index_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_index_service.py @@ -2293,13 +2293,13 @@ def test_list_indexes_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_indexes(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py index 9af7933b22..b8c1a9f647 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py @@ -2335,13 +2335,13 @@ def test_list_custom_jobs_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_custom_jobs(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -4563,13 +4563,13 @@ def test_list_data_labeling_jobs_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_data_labeling_jobs(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -6786,13 +6786,13 @@ def test_list_hyperparameter_tuning_jobs_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_hyperparameter_tuning_jobs(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -8901,13 +8901,13 @@ def test_list_nas_jobs_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_nas_jobs(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -10605,13 +10605,13 @@ def test_list_nas_trial_details_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_nas_trial_details(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -12074,13 +12074,13 @@ def test_list_batch_prediction_jobs_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_batch_prediction_jobs(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -13959,15 +13959,15 @@ def test_search_model_deployment_monitoring_stats_anomalies_pager( RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata( (("model_deployment_monitoring_job", ""),) ), ) pager = client.search_model_deployment_monitoring_stats_anomalies(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -15015,13 +15015,13 @@ def test_list_model_deployment_monitoring_jobs_pager(transport_name: str = "grpc RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_model_deployment_monitoring_jobs(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py index 05d69d3d77..931223632a 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py @@ -2419,13 +2419,13 @@ def test_list_metadata_stores_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_metadata_stores(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -4209,13 +4209,13 @@ def test_list_artifacts_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_artifacts(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -6738,13 +6738,13 @@ def test_list_contexts_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_contexts(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -10848,13 +10848,13 @@ def test_list_executions_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_executions(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -14231,13 +14231,13 @@ def test_list_metadata_schemas_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_metadata_schemas(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py index a8d61b1fbd..b3beae1d34 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py @@ -1599,13 +1599,13 @@ def test_search_migratable_resources_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.search_migratable_resources(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -3535,22 +3535,19 @@ def test_parse_annotated_dataset_path(): def test_dataset_path(): project = "cuttlefish" - location = "mussel" - dataset = "winkle" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( + dataset = "mussel" + expected = "projects/{project}/datasets/{dataset}".format( project=project, - location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, location, dataset) + actual = MigrationServiceClient.dataset_path(project, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "nautilus", - "location": "scallop", - "dataset": "abalone", + "project": "winkle", + "dataset": "nautilus", } path = MigrationServiceClient.dataset_path(**expected) @@ -3560,19 +3557,22 @@ def test_parse_dataset_path(): def test_dataset_path(): - project = "squid" - dataset = "clam" - expected = "projects/{project}/datasets/{dataset}".format( + project = "scallop" + location = "abalone" + dataset = "squid" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, + location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, dataset) + actual = MigrationServiceClient.dataset_path(project, location, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "whelk", + "project": "clam", + "location": "whelk", "dataset": "octopus", } path = MigrationServiceClient.dataset_path(**expected) diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_model_garden_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_model_garden_service.py index fabcaba801..17d5e4add9 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_model_garden_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_model_garden_service.py @@ -2052,13 +2052,13 @@ def test_list_publisher_models_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_publisher_models(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_model_monitoring_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_model_monitoring_service.py index 00075dc2c7..c5b19a30e3 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_model_monitoring_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_model_monitoring_service.py @@ -2931,13 +2931,13 @@ def test_list_model_monitors_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_model_monitors(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -4754,13 +4754,13 @@ def test_list_model_monitoring_jobs_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_model_monitoring_jobs(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -5750,13 +5750,13 @@ def test_search_model_monitoring_stats_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("model_monitor", ""),)), ) pager = client.search_model_monitoring_stats(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -6367,13 +6367,13 @@ def test_search_model_monitoring_alerts_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("model_monitor", ""),)), ) pager = client.search_model_monitoring_alerts(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py index 3796525e89..a49ffdfd91 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py @@ -2377,13 +2377,13 @@ def test_list_models_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_models(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -2966,13 +2966,13 @@ def test_list_model_versions_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", ""),)), ) pager = client.list_model_versions(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -8112,13 +8112,13 @@ def test_list_model_evaluations_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_model_evaluations(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -9105,13 +9105,13 @@ def test_list_model_evaluation_slices_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_model_evaluation_slices(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_notebook_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_notebook_service.py index cb3dcdb104..86ea55606c 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_notebook_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_notebook_service.py @@ -2474,13 +2474,13 @@ def test_list_notebook_runtime_templates_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_notebook_runtime_templates(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -4810,13 +4810,13 @@ def test_list_notebook_runtimes_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_notebook_runtimes(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -7436,13 +7436,13 @@ def test_list_notebook_execution_jobs_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_notebook_execution_jobs(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_persistent_resource_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_persistent_resource_service.py index b319e7c8a7..b654b0e7c3 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_persistent_resource_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_persistent_resource_service.py @@ -2507,13 +2507,13 @@ def test_list_persistent_resources_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_persistent_resources(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py index 57acfe6058..2dcbd54875 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py @@ -2465,13 +2465,13 @@ def test_list_training_pipelines_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_training_pipelines(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -4696,13 +4696,13 @@ def test_list_pipeline_jobs_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_pipeline_jobs(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_reasoning_engine_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_reasoning_engine_service.py index 0e7e3e3d47..74e92a6569 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_reasoning_engine_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_reasoning_engine_service.py @@ -2459,13 +2459,13 @@ def test_list_reasoning_engines_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_reasoning_engines(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_schedule_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_schedule_service.py index 28568799ee..1f7e9b05fc 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_schedule_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_schedule_service.py @@ -2785,13 +2785,13 @@ def test_list_schedules_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_schedules(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py index 689cf9e3ca..d11905a6ea 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py @@ -2461,13 +2461,13 @@ def test_list_specialist_pools_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_specialist_pools(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py index 1a1159a0e7..997cf10337 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py @@ -2827,13 +2827,13 @@ def test_list_tensorboards_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_tensorboards(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -5877,13 +5877,13 @@ def test_list_tensorboard_experiments_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_tensorboard_experiments(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -8530,13 +8530,13 @@ def test_list_tensorboard_runs_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_tensorboard_runs(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -11272,13 +11272,13 @@ def test_list_tensorboard_time_series_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_tensorboard_time_series(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -14304,15 +14304,15 @@ def test_export_tensorboard_time_series_data_pager(transport_name: str = "grpc") RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata( (("tensorboard_time_series", ""),) ), ) pager = client.export_tensorboard_time_series_data(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_vertex_rag_data_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_vertex_rag_data_service.py index c2091a1cfe..21611f7c90 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_vertex_rag_data_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_vertex_rag_data_service.py @@ -2397,13 +2397,13 @@ def test_list_rag_corpora_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_rag_corpora(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -4551,13 +4551,13 @@ def test_list_rag_files_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_rag_files(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -5093,6 +5093,13 @@ def test_create_rag_corpus_rest(request_type): "name": "name_value", "display_name": "display_name_value", "description": "description_value", + "rag_embedding_model_config": { + "vertex_prediction_endpoint": { + "endpoint": "endpoint_value", + "model": "model_value", + "model_version_id": "model_version_id_value", + } + }, "create_time": {"seconds": 751, "nanos": 543}, "update_time": {}, } @@ -8743,6 +8750,58 @@ def test_vertex_rag_data_service_grpc_lro_async_client(): assert transport.operations_client is transport.operations_client +def test_endpoint_path(): + project = "squid" + location = "clam" + endpoint = "whelk" + expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format( + project=project, + location=location, + endpoint=endpoint, + ) + actual = VertexRagDataServiceClient.endpoint_path(project, location, endpoint) + assert expected == actual + + +def test_parse_endpoint_path(): + expected = { + "project": "octopus", + "location": "oyster", + "endpoint": "nudibranch", + } + path = VertexRagDataServiceClient.endpoint_path(**expected) + + # Check that the path construction is reversible. + actual = VertexRagDataServiceClient.parse_endpoint_path(path) + assert expected == actual + + +def test_model_path(): + project = "cuttlefish" + location = "mussel" + model = "winkle" + expected = "projects/{project}/locations/{location}/models/{model}".format( + project=project, + location=location, + model=model, + ) + actual = VertexRagDataServiceClient.model_path(project, location, model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "model": "abalone", + } + path = VertexRagDataServiceClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = VertexRagDataServiceClient.parse_model_path(path) + assert expected == actual + + def test_rag_corpus_path(): project = "squid" location = "clam" diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py index 025f07d83e..31f0f2dd3b 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py @@ -2308,13 +2308,13 @@ def test_list_studies_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_studies(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -4681,13 +4681,13 @@ def test_list_trials_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_trials(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 From 65f8111d7ccb8f1dbd46c61993825896ca3ff797 Mon Sep 17 00:00:00 2001 From: Matthew Tang Date: Wed, 5 Jun 2024 14:28:10 -0700 Subject: [PATCH 17/36] chore: Change RuntimeError to warning for tool context error. PiperOrigin-RevId: 640650412 --- google/cloud/aiplatform/telemetry.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/google/cloud/aiplatform/telemetry.py b/google/cloud/aiplatform/telemetry.py index f8002de007..efe37b4649 100644 --- a/google/cloud/aiplatform/telemetry.py +++ b/google/cloud/aiplatform/telemetry.py @@ -16,6 +16,10 @@ # import contextlib +from google.cloud.aiplatform import base + + +_LOGGER = base.Logger(__name__) _tool_names_to_append = [] @@ -49,12 +53,15 @@ def tool_context_manager(tool_name: str) -> None: def _append_tool_name(tool_name: str) -> None: - _tool_names_to_append.append(tool_name) + if _tool_names_to_append[-1] != tool_name: + _tool_names_to_append.append(tool_name) def _pop_tool_name(tool_name: str) -> None: if not _tool_names_to_append or _tool_names_to_append[-1] != tool_name: - raise RuntimeError( - "Tool context error detected. This can occur due to parallelization." + _LOGGER.warning( + "Gapic client context issue detected." + + "This can occur due to parallelization." ) + return _tool_names_to_append.pop() From c21beeff427b5c4a8b7750f7a4237176c97bc98f Mon Sep 17 00:00:00 2001 From: Zhenyi Qi Date: Wed, 5 Jun 2024 16:08:48 -0700 Subject: [PATCH 18/36] feat: Add cache module and CachedContent class PiperOrigin-RevId: 640681342 --- google/cloud/aiplatform/compat/__init__.py | 2 + .../aiplatform/compat/services/__init__.py | 3 + .../cloud/aiplatform/compat/types/__init__.py | 2 + google/cloud/aiplatform/initializer.py | 8 +- google/cloud/aiplatform/utils/__init__.py | 13 ++ tests/unit/vertexai/test_caching.py | 135 +++++++++++ vertexai/preview/caching.py | 210 ++++++++++++++++++ vertexai/preview/generative_models.py | 1 - 8 files changed, 372 insertions(+), 2 deletions(-) create mode 100644 tests/unit/vertexai/test_caching.py create mode 100644 vertexai/preview/caching.py diff --git a/google/cloud/aiplatform/compat/__init__.py b/google/cloud/aiplatform/compat/__init__.py index b7f48a3a06..5583d68ed1 100644 --- a/google/cloud/aiplatform/compat/__init__.py +++ b/google/cloud/aiplatform/compat/__init__.py @@ -43,6 +43,7 @@ services.featurestore_online_serving_service_client_v1beta1 ) services.featurestore_service_client = services.featurestore_service_client_v1beta1 + services.gen_ai_cache_service_client = services.gen_ai_cache_service_client_v1beta1 services.job_service_client = services.job_service_client_v1beta1 services.model_service_client = services.model_service_client_v1beta1 services.model_garden_service_client = services.model_garden_service_client_v1beta1 @@ -69,6 +70,7 @@ types.annotation_spec = types.annotation_spec_v1beta1 types.artifact = types.artifact_v1beta1 types.batch_prediction_job = types.batch_prediction_job_v1beta1 + types.cached_content = types.cached_content_v1beta1 types.completion_stats = types.completion_stats_v1beta1 types.context = types.context_v1beta1 types.custom_job = types.custom_job_v1beta1 diff --git a/google/cloud/aiplatform/compat/services/__init__.py b/google/cloud/aiplatform/compat/services/__init__.py index 4d6c2aef9d..497e762403 100644 --- a/google/cloud/aiplatform/compat/services/__init__.py +++ b/google/cloud/aiplatform/compat/services/__init__.py @@ -45,6 +45,9 @@ from google.cloud.aiplatform_v1beta1.services.featurestore_service import ( client as featurestore_service_client_v1beta1, ) +from google.cloud.aiplatform_v1beta1.services.gen_ai_cache_service import ( + client as gen_ai_cache_service_client_v1beta1, +) from google.cloud.aiplatform_v1beta1.services.index_service import ( client as index_service_client_v1beta1, ) diff --git a/google/cloud/aiplatform/compat/types/__init__.py b/google/cloud/aiplatform/compat/types/__init__.py index 470d72088d..20bf0de92a 100644 --- a/google/cloud/aiplatform/compat/types/__init__.py +++ b/google/cloud/aiplatform/compat/types/__init__.py @@ -21,6 +21,7 @@ annotation_spec as annotation_spec_v1beta1, artifact as artifact_v1beta1, batch_prediction_job as batch_prediction_job_v1beta1, + cached_content as cached_content_v1beta1, completion_stats as completion_stats_v1beta1, context as context_v1beta1, custom_job as custom_job_v1beta1, @@ -55,6 +56,7 @@ featurestore_monitoring as featurestore_monitoring_v1beta1, featurestore_online_service as featurestore_online_service_v1beta1, featurestore_service as featurestore_service_v1beta1, + gen_ai_cache_service as gen_ai_cache_service_v1beta1, index as index_v1beta1, index_endpoint as index_endpoint_v1beta1, hyperparameter_tuning_job as hyperparameter_tuning_job_v1beta1, diff --git a/google/cloud/aiplatform/initializer.py b/google/cloud/aiplatform/initializer.py index 7a7d784b16..a615baa4bb 100644 --- a/google/cloud/aiplatform/initializer.py +++ b/google/cloud/aiplatform/initializer.py @@ -21,7 +21,7 @@ import logging import os import types -from typing import Iterator, List, Optional, Type, TypeVar, Union +from typing import Iterator, List, Optional, Type, TypeVar, Tuple, Union from google.api_core import client_options from google.api_core import gapic_v1 @@ -519,6 +519,12 @@ def create_client( return client_class(**kwargs) + def _get_default_project_and_location(self) -> Tuple[str, str]: + return ( + self.project, + self.location, + ) + # global config to store init parameters: ie, aiplatform.init(project=..., location=...) global_config = _Config() diff --git a/google/cloud/aiplatform/utils/__init__.py b/google/cloud/aiplatform/utils/__init__.py index 63639aed9d..73cded31ce 100644 --- a/google/cloud/aiplatform/utils/__init__.py +++ b/google/cloud/aiplatform/utils/__init__.py @@ -46,6 +46,7 @@ feature_registry_service_client_v1beta1, featurestore_online_serving_service_client_v1beta1, featurestore_service_client_v1beta1, + gen_ai_cache_service_client_v1beta1, index_service_client_v1beta1, index_endpoint_service_client_v1beta1, job_service_client_v1beta1, @@ -734,6 +735,18 @@ class FeaturestoreOnlineServingClientWithOverride(ClientWithOverride): ) +class GenAiCacheServiceClientWithOverride(ClientWithOverride): + _is_temporary = True + # TODO(b/342585299): Switch to compat.DEFAULT_VERSION once v1 is available. + _default_version = "v1beta1" + _version_map = ( + ( + compat.V1BETA1, + gen_ai_cache_service_client_v1beta1.GenAiCacheServiceClient, + ), + ) + + class JobClientWithOverride(ClientWithOverride): _is_temporary = True _default_version = compat.DEFAULT_VERSION diff --git a/tests/unit/vertexai/test_caching.py b/tests/unit/vertexai/test_caching.py new file mode 100644 index 0000000000..6ecc83ee98 --- /dev/null +++ b/tests/unit/vertexai/test_caching.py @@ -0,0 +1,135 @@ +# -*- coding: utf-8 -*- + +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +import datetime +import pytest +import mock +from vertexai.preview import caching +from google.cloud.aiplatform import initializer +import vertexai +from google.cloud.aiplatform_v1beta1.types.cached_content import ( + CachedContent as GapicCachedContent, +) +from google.cloud.aiplatform_v1beta1.types.content import ( + Content as GapicContent, + Part as GapicPart, +) +from google.cloud.aiplatform_v1beta1.types.tool import ( + ToolConfig as GapicToolConfig, +) +from google.cloud.aiplatform_v1beta1.services import ( + gen_ai_cache_service, +) + + +_TEST_PROJECT = "test-project" +_TEST_LOCATION = "us-central1" +_CREATED_CONTENT_ID = "contents-id-mocked" + + +@pytest.fixture +def mock_create_cached_content(): + """Mocks GenAiCacheServiceClient.create_cached_content().""" + + def create_cached_content(self, request): + response = GapicCachedContent( + name=f"{request.parent}/cachedContents/{_CREATED_CONTENT_ID}", + ) + return response + + with mock.patch.object( + gen_ai_cache_service.client.GenAiCacheServiceClient, + "create_cached_content", + new=create_cached_content, + ) as create_cached_content: + yield create_cached_content + + +@pytest.fixture +def mock_get_cached_content(): + """Mocks GenAiCacheServiceClient.get_cached_content().""" + + def get_cached_content(self, name, retry): + del self, retry + response = GapicCachedContent( + name=f"{name}", + ) + return response + + with mock.patch.object( + gen_ai_cache_service.client.GenAiCacheServiceClient, + "get_cached_content", + new=get_cached_content, + ) as get_cached_content: + yield get_cached_content + + +@pytest.mark.usefixtures("google_auth_mock") +class TestCaching: + """Unit tests for caching.CachedContent.""" + + def setup_method(self): + vertexai.init( + project=_TEST_PROJECT, + location=_TEST_LOCATION, + ) + + def teardown_method(self): + initializer.global_pool.shutdown(wait=True) + + def test_constructor_with_full_resource_name(self, mock_get_cached_content): + full_resource_name = ( + "projects/123/locations/europe-west1/cachedContents/contents-id" + ) + cache = caching.CachedContent( + cached_content_name=full_resource_name, + ) + + assert cache.name == "contents-id" + assert cache.resource_name == full_resource_name + + def test_constructor_with_only_content_id(self, mock_get_cached_content): + partial_resource_name = "contents-id" + cache = caching.CachedContent( + cached_content_name=partial_resource_name, + ) + assert cache.name == "contents-id" + assert cache.resource_name == ( + f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/cachedContents/contents-id" + ) + + def test_create_with_real_payload( + self, mock_create_cached_content, mock_get_cached_content + ): + cache = caching.CachedContent.create( + model_name="model-name", + system_instruction=GapicContent( + role="system", parts=[GapicPart(text="system instruction")] + ), + tools=[], + tool_config=GapicToolConfig(), + contents=[GapicContent(role="user")], + ttl=datetime.timedelta(days=1), + ) + + # parent is automantically set to align with the current project and location. + # _CREATED_CONTENT_ID is from the mock + assert cache.resource_name == ( + f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/cachedContents/{_CREATED_CONTENT_ID}" + ) + assert cache.name == _CREATED_CONTENT_ID diff --git a/vertexai/preview/caching.py b/vertexai/preview/caching.py new file mode 100644 index 0000000000..ce5240d440 --- /dev/null +++ b/vertexai/preview/caching.py @@ -0,0 +1,210 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import datetime +from typing import Optional, List + +from google.cloud.aiplatform import base as aiplatform_base +from google.cloud.aiplatform import initializer as aiplatform_initializer +from google.cloud.aiplatform import utils as aiplatform_utils +from google.cloud.aiplatform.compat.types import ( + cached_content_v1beta1 as gca_cached_content, +) +from google.cloud.aiplatform_v1beta1.services import gen_ai_cache_service +from google.cloud.aiplatform_v1beta1.types.cached_content import ( + CachedContent as GapicCachedContent, +) +from google.cloud.aiplatform_v1beta1.types.content import ( + Content, +) +from google.cloud.aiplatform_v1beta1.types.tool import ( + Tool, + ToolConfig, +) +from google.cloud.aiplatform_v1beta1.types.gen_ai_cache_service import ( + CreateCachedContentRequest, + GetCachedContentRequest, +) + + +def _prepare_create_request( + model_name: str, + *, + system_instruction: Optional[Content] = None, + tools: Optional[List[Tool]] = None, + tool_config: Optional[ToolConfig] = None, + contents: Optional[List[Content]] = None, + expire_time: Optional[datetime.datetime] = None, + ttl: Optional[datetime.timedelta] = None, +) -> CreateCachedContentRequest: + """Prepares the request create_cached_content RPC.""" + ( + project, + location, + ) = aiplatform_initializer.global_config._get_default_project_and_location() + + if ttl and expire_time: + raise ValueError("Only one of ttl and expire_time can be set.") + + request = CreateCachedContentRequest( + parent=f"projects/{project}/locations/{location}", + cached_content=GapicCachedContent( + model=model_name, + system_instruction=system_instruction, + tools=tools, + tool_config=tool_config, + contents=contents, + expire_time=expire_time, + ttl=ttl, + ), + ) + return request + + +def _prepare_get_cached_content_request(name: str) -> GetCachedContentRequest: + return GetCachedContentRequest(name=name) + + +class CachedContent(aiplatform_base._VertexAiResourceNounPlus): + """A cached content resource.""" + + _resource_noun = "cachedContent" + _getter_method = "get_cached_content" + _list_method = "list_cached_contents" + _delete_method = "delete_cached_content" + _parse_resource_name_method = "parse_cached_content_path" + _format_resource_name_method = "cached_content_path" + + client_class = aiplatform_utils.GenAiCacheServiceClientWithOverride + + _gen_ai_cache_service_client_value: Optional[ + gen_ai_cache_service.GenAiCacheServiceClient + ] = None + + def __init__(self, cached_content_name: str): + """Represents a cached content resource. + + This resource can be used with vertexai.generative_models.GenerativeModel + to cache the prefix so it can be used across multiple generate_content + requests. + + Args: + cached_content_name (str): + Required. The name of the cached content resource. It could be a + fully-qualified CachedContent resource name or a CachedContent + ID. Example: "projects/.../locations/../cachedContents/456" or + "456". + """ + super().__init__(resource_name=cached_content_name) + + resource_name = aiplatform_utils.full_resource_name( + resource_name=cached_content_name, + resource_noun=self._resource_noun, + parse_resource_name_method=self._parse_resource_name, + format_resource_name_method=self._format_resource_name, + project=self.project, + location=self.location, + parent_resource_name_fields=None, + resource_id_validator=self._resource_id_validator, + ) + self._gca_resource = gca_cached_content.CachedContent(name=resource_name) + + @property + def _raw_cached_content(self) -> gca_cached_content.CachedContent: + return self._gca_resource + + @property + def model_name(self) -> str: + if not self._raw_cached_content.model: + self._sync_gca_resource() + return self._raw_cached_content.model + + @classmethod + def create( + cls, + *, + model_name: str, + # TODO: allow the types to be wrapped types + system_instruction: Optional[Content] = None, + tools: Optional[List[Tool]] = None, + tool_config: Optional[ToolConfig] = None, + contents: Optional[List[Content]] = None, + expire_time: Optional[datetime.datetime] = None, + ttl: Optional[datetime.timedelta] = None, + ) -> "CachedContent": + """Creates a new cached content through the gen ai cache service. + + Usage: + + Args: + model: + Immutable. The name of the publisher model to use for cached + content. + Allowed formats: + + projects/{project}/locations/{location}/publishers/{publisher}/models/{model}, or + publishers/{publisher}/models/{model}, or + a single model name. + system_instruction: + Optional. Immutable. Developer-set system instruction. + Currently, text only. + contents: + Optional. Immutable. The content to cache as a list of Content. + tools: + Optional. Immutable. A list of ``Tools`` the model may use to + generate the next response. + tool_config: + Optional. Immutable. Tool config. This config is shared for all + tools. + expire_time: + Timestamp of when this resource is considered expired. + + At most one of expire_time and ttl can be set. If neither is set, + default TTL on the API side will be used (currently 1 hour). + ttl: + The TTL for this resource. If provided, the expiration time is + computed: created_time + TTL. + + At most one of expire_time and ttl can be set. If neither is set, + default TTL on the API side will be used (currently 1 hour). + Returns: + A CachedContent object with only name and model_name specified. + Raises: + ValueError: If both expire_time and ttl are set. + """ + project = aiplatform_initializer.global_config.project + location = aiplatform_initializer.global_config.location + if model_name.startswith("publishers/"): + model_name = f"projects/{project}/locations/{location}/{model_name}" + elif not model_name.startswith("projects/"): + model_name = f"projects/{project}/locations/{location}/publishers/google/models/{model_name}" + + if ttl and expire_time: + raise ValueError("Only one of ttl and expire_time can be set.") + + request = _prepare_create_request( + model_name=model_name, + system_instruction=system_instruction, + tools=tools, + tool_config=tool_config, + contents=contents, + expire_time=expire_time, + ttl=ttl, + ) + client = cls._instantiate_client(location=location) + cached_content_resource = client.create_cached_content(request) + obj = cls(cached_content_resource.name) + obj._gca_resource = cached_content_resource + return obj diff --git a/vertexai/preview/generative_models.py b/vertexai/preview/generative_models.py index 187cecdbe1..cb70d23d5f 100644 --- a/vertexai/preview/generative_models.py +++ b/vertexai/preview/generative_models.py @@ -69,5 +69,4 @@ class ChatSession(_PreviewChatSession): "SafetySetting", "Tool", "ToolConfig", - # ] From 52d3e255ce89372c100a943ecc54dce8014c6e9b Mon Sep 17 00:00:00 2001 From: Zhenyi Qi Date: Wed, 5 Jun 2024 17:07:55 -0700 Subject: [PATCH 19/36] feat: GenAI - Explicit Caching - Allow CachedContent to take wrapped types PiperOrigin-RevId: 640697873 --- tests/unit/vertexai/test_caching.py | 25 ++ vertexai/generative_models/__init__.py | 6 +- .../generative_models/_generative_models.py | 238 +++++++++++++----- vertexai/preview/caching.py | 48 +++- 4 files changed, 246 insertions(+), 71 deletions(-) diff --git a/tests/unit/vertexai/test_caching.py b/tests/unit/vertexai/test_caching.py index 6ecc83ee98..a6e7c75c2b 100644 --- a/tests/unit/vertexai/test_caching.py +++ b/tests/unit/vertexai/test_caching.py @@ -49,6 +49,7 @@ def mock_create_cached_content(): def create_cached_content(self, request): response = GapicCachedContent( name=f"{request.parent}/cachedContents/{_CREATED_CONTENT_ID}", + model=f"{request.cached_content.model}", ) return response @@ -133,3 +134,27 @@ def test_create_with_real_payload( f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/cachedContents/{_CREATED_CONTENT_ID}" ) assert cache.name == _CREATED_CONTENT_ID + assert ( + cache.model_name + == f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/publishers/google/models/model-name" + ) + + def test_create_with_real_payload_and_wrapped_type( + self, mock_create_cached_content, mock_get_cached_content + ): + cache = caching.CachedContent.create( + model_name="model-name", + system_instruction="Please answer my questions with cool", + tools=[], + tool_config=GapicToolConfig(), + contents=["user content"], + ttl=datetime.timedelta(days=1), + ) + + # parent is automantically set to align with the current project and location. + # _CREATED_CONTENT_ID is from the mock + assert ( + cache.model_name + == f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/publishers/google/models/model-name" + ) + assert cache.name == _CREATED_CONTENT_ID diff --git a/vertexai/generative_models/__init__.py b/vertexai/generative_models/__init__.py index 71f0d07cab..a4d6d29298 100644 --- a/vertexai/generative_models/__init__.py +++ b/vertexai/generative_models/__init__.py @@ -17,14 +17,14 @@ # We just want to re-export certain classes # pylint: disable=g-multiple-import,g-importing-member from vertexai.generative_models._generative_models import ( - GenerativeModel, - GenerationConfig, - GenerationResponse, Candidate, ChatSession, Content, FinishReason, FunctionDeclaration, + GenerationConfig, + GenerativeModel, + GenerationResponse, HarmCategory, HarmBlockThreshold, Image, diff --git a/vertexai/generative_models/_generative_models.py b/vertexai/generative_models/_generative_models.py index 85afc688a5..08ae92d486 100644 --- a/vertexai/generative_models/_generative_models.py +++ b/vertexai/generative_models/_generative_models.py @@ -1,4 +1,4 @@ -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -31,6 +31,7 @@ Optional, Sequence, Union, + TYPE_CHECKING, ) from google.cloud.aiplatform import initializer as aiplatform_initializer @@ -45,9 +46,12 @@ ) from google.cloud.aiplatform_v1beta1.types import tool as gapic_tool_types from google.protobuf import json_format -from vertexai.preview import rag import warnings +if TYPE_CHECKING: + from vertexai.preview import rag + from vertexai.preview import caching + try: from PIL import Image as PIL_Image # pylint: disable=g-import-not-at-top except ImportError: @@ -98,6 +102,169 @@ ] +def _reconcile_model_name(model_name: str, project: str, location: str) -> str: + """Returns a model name that's one of the following: + 1. A full resource name starting with projects/ + 2. A partial resource name starting with publishers/ + """ + if "/" not in model_name: + return f"publishers/google/models/{model_name}" + elif model_name.startswith("models/"): + return f"projects/{project}/locations/{location}/publishers/google/{model_name}" + elif model_name.startswith("publishers/") or model_name.startswith("projects/"): + return model_name + else: + raise ValueError( + "model_name must be either a Model Garden model ID or a full resource name." + f"recieved model_name {model_name}" + ) + + +def _get_resource_name_from_model_name( + model_name: str, project: str, location: str +) -> str: + """Returns the full resource name starting with projects/ given a model name.""" + if model_name.startswith("publishers/"): + return f"projects/{project}/locations/{location}/{model_name}" + elif model_name.startswith("projects/"): + return model_name + else: + raise ValueError( + "model_name must be either a Model Garden model ID or a full resource name." + ) + + +def _validate_generate_content_parameters( + contents: ContentsType, + *, + generation_config: Optional[GenerationConfigType] = None, + safety_settings: Optional[SafetySettingsType] = None, + tools: Optional[List["Tool"]] = None, + tool_config: Optional["ToolConfig"] = None, + system_instruction: Optional[PartsType] = None, + cached_content: Optional["caching.CachedContent"] = None, +) -> None: + """Validates the parameters for a generate_content call.""" + if not contents: + raise TypeError("contents must not be empty") + + _validate_contents_type_as_valid_sequence(contents) + + if cached_content and any([tools, tool_config, system_instruction]): + raise ValueError( + "When using cached_content, tools, tool_config, and system_instruction must be None." + ) + + if safety_settings: + _validate_safety_settings_type_as_valid_sequence(safety_settings) + + if generation_config: + if not isinstance( + generation_config, + (gapic_content_types.GenerationConfig, GenerationConfig, Dict), + ): + raise TypeError( + "generation_config must either be a GenerationConfig object or a dictionary representation of it." + ) + + if tools: + _validate_tools_type_as_valid_sequence(tools) + + if tool_config: + _validate_tool_config_type(tool_config) + + +def _validate_contents_type_as_valid_sequence(contents: ContentsType) -> None: + """Makes sure that individual elements of contents are of valid type.""" + # contents can either be a list of Content objects (most generic case) + if isinstance(contents, Sequence) and any( + isinstance(c, gapic_content_types.Content) for c in contents + ): + if not all(isinstance(c, gapic_content_types.Content) for c in contents): + raise TypeError( + "When passing a list with Content objects, every item in a " + + "list must be a Content object." + ) + elif isinstance(contents, Sequence) and any( + isinstance(c, Content) for c in contents + ): + if not all(isinstance(c, Content) for c in contents): + raise TypeError( + "When passing a list with Content objects, every item in a " + + "list must be a Content object." + ) + elif isinstance(contents, Sequence) and any(isinstance(c, dict) for c in contents): + if not all(isinstance(c, dict) for c in contents): + raise TypeError( + "When passing a list with Content dict objects, every item in " + + "a list must be a Content dict object." + ) + + +def _validate_safety_settings_type_as_valid_sequence( + safety_settings: SafetySettingsType, +) -> None: + if not isinstance(safety_settings, (Sequence, Dict)): + raise TypeError( + "safety_settings must either be a SafetySetting object or a " + + "dictionary mapping from HarmCategory to HarmBlockThreshold." + ) + if isinstance(safety_settings, Sequence): + for safety_setting in safety_settings: + if not isinstance( + safety_setting, + (gapic_content_types.SafetySetting, SafetySetting), + ): + raise TypeError( + "When passing a list with SafetySettings objects, every " + + "item in a list must be a SafetySetting object." + ) + + +def _validate_tools_type_as_valid_sequence(tools: List["Tool"]): + for tool in tools: + if not isinstance(tool, (gapic_tool_types.Tool, Tool)): + raise TypeError(f"Unexpected tool type: {tool}.") + + +def _validate_tool_config_type(tool_config: "ToolConfig"): + if not isinstance(tool_config, ToolConfig): + raise TypeError("tool_config must be a ToolConfig object.") + + +def _content_types_to_gapic_contents( + contents: ContentsType, +) -> List[gapic_content_types.Content]: + """Converts a list of Content objects to a list of gapic_content_types.Content objects.""" + if isinstance(contents, Sequence) and any( + isinstance(c, gapic_content_types.Content) for c in contents + ): + return contents + elif isinstance(contents, Sequence) and any( + isinstance(c, Content) for c in contents + ): + return [content._raw_content for content in contents] + elif isinstance(contents, Sequence) and any(isinstance(c, dict) for c in contents): + return [gapic_content_types.Content(content_dict) for content_dict in contents] + # or a value that can be converted to a *single* Content object + else: + return [_to_content(contents)] + + +def _tool_types_to_gapic_tools( + tools: Optional[List["Tool"]], +) -> List[gapic_tool_types.Tool]: + """Converts a list of Tool objects to a list of gapic_tool_types.Tool objects.""" + gapic_tools = [] + if tools: + for tool in tools: + if isinstance(tool, gapic_tool_types.Tool): + gapic_tools.append(tool) + elif isinstance(tool, Tool): + gapic_tools.append(tool._raw_tool) + return gapic_tools + + class _GenerativeModel: r"""A model that can generate content. @@ -194,7 +361,7 @@ def __init__( self._system_instruction = system_instruction # Validating the parameters - self._prepare_request( + _validate_generate_content_parameters( contents="test", generation_config=generation_config, safety_settings=safety_settings, @@ -251,35 +418,16 @@ def _prepare_request( tool_config = tool_config or self._tool_config system_instruction = system_instruction or self._system_instruction - # contents can either be a list of Content objects (most generic case) - if isinstance(contents, Sequence) and any( - isinstance(c, gapic_content_types.Content) for c in contents - ): - if not all(isinstance(c, gapic_content_types.Content) for c in contents): - raise TypeError( - "When passing a list with Content objects, every item in a list must be a Content object." - ) - elif isinstance(contents, Sequence) and any( - isinstance(c, Content) for c in contents - ): - if not all(isinstance(c, Content) for c in contents): - raise TypeError( - "When passing a list with Content objects, every item in a list must be a Content object." - ) - contents = [content._raw_content for content in contents] - elif isinstance(contents, Sequence) and any( - isinstance(c, dict) for c in contents - ): - if not all(isinstance(c, dict) for c in contents): - raise TypeError( - "When passing a list with Content dict objects, every item in a list must be a Content dict object." - ) - contents = [ - gapic_content_types.Content(content_dict) for content_dict in contents - ] - # or a value that can be converted to a *single* Content object - else: - contents = [_to_content(contents)] + _validate_generate_content_parameters( + contents=contents, + generation_config=generation_config, + safety_settings=safety_settings, + tools=tools, + tool_config=tool_config, + system_instruction=system_instruction, + ) + + contents = _content_types_to_gapic_contents(contents) gapic_system_instruction: Optional[gapic_content_types.Content] = None if system_instruction: @@ -295,10 +443,6 @@ def _prepare_request( gapic_generation_config = gapic_content_types.GenerationConfig( **generation_config ) - else: - raise TypeError( - "generation_config must either be a GenerationConfig object or a dictionary representation of it." - ) gapic_safety_settings = None if safety_settings: @@ -309,10 +453,6 @@ def _prepare_request( gapic_safety_settings.append(safety_setting) elif isinstance(safety_setting, SafetySetting): gapic_safety_settings.append(safety_setting._raw_safety_setting) - else: - raise TypeError( - "When passing a list with SafetySettings objects, every item in a list must be a SafetySetting object." - ) elif isinstance(safety_settings, dict): gapic_safety_settings = [ gapic_content_types.SafetySetting( @@ -323,28 +463,14 @@ def _prepare_request( ) for category, threshold in safety_settings.items() ] - else: - raise TypeError( - "safety_settings must either be a list of SafetySettings objects or a dictionary mapping from HarmCategory to HarmBlockThreshold." - ) gapic_tools = None if tools: - gapic_tools = [] - for tool in tools: - if isinstance(tool, gapic_tool_types.Tool): - gapic_tools.append(tool) - elif isinstance(tool, Tool): - gapic_tools.append(tool._raw_tool) - else: - raise TypeError(f"Unexpected tool type: {tool}.") + gapic_tools = _tool_types_to_gapic_tools(tools) gapic_tool_config = None if tool_config: - if isinstance(tool_config, ToolConfig): - gapic_tool_config = tool_config._gapic_tool_config - else: - raise TypeError("tool_config must be a ToolConfig object.") + gapic_tool_config = tool_config._gapic_tool_config return gapic_prediction_service_types.GenerateContentRequest( # The `model` parameter now needs to be set for the vision models. diff --git a/vertexai/preview/caching.py b/vertexai/preview/caching.py index ce5240d440..6caa0bb44a 100644 --- a/vertexai/preview/caching.py +++ b/vertexai/preview/caching.py @@ -26,26 +26,30 @@ from google.cloud.aiplatform_v1beta1.types.cached_content import ( CachedContent as GapicCachedContent, ) -from google.cloud.aiplatform_v1beta1.types.content import ( - Content, -) -from google.cloud.aiplatform_v1beta1.types.tool import ( - Tool, - ToolConfig, +from google.cloud.aiplatform_v1beta1.types import ( + content as gapic_content_types, ) from google.cloud.aiplatform_v1beta1.types.gen_ai_cache_service import ( CreateCachedContentRequest, GetCachedContentRequest, ) +from vertexai.generative_models import _generative_models +from vertexai.generative_models._generative_models import ( + Content, + PartsType, + Tool, + ToolConfig, + ContentsType, +) def _prepare_create_request( model_name: str, *, - system_instruction: Optional[Content] = None, + system_instruction: Optional[PartsType] = None, tools: Optional[List[Tool]] = None, tool_config: Optional[ToolConfig] = None, - contents: Optional[List[Content]] = None, + contents: Optional[ContentsType] = None, expire_time: Optional[datetime.datetime] = None, ttl: Optional[datetime.timedelta] = None, ) -> CreateCachedContentRequest: @@ -54,6 +58,27 @@ def _prepare_create_request( project, location, ) = aiplatform_initializer.global_config._get_default_project_and_location() + if contents: + _generative_models._validate_contents_type_as_valid_sequence(contents) + if tools: + _generative_models._validate_tools_type_as_valid_sequence(tools) + if tool_config: + _generative_models._validate_tool_config_type(tool_config) + + # contents can either be a list of Content objects (most generic case) + contents = _generative_models._content_types_to_gapic_contents(contents) + + gapic_system_instruction: Optional[gapic_content_types.Content] = None + if system_instruction: + gapic_system_instruction = _generative_models._to_content(system_instruction) + + gapic_tools = None + if tools: + gapic_tools = _generative_models._tool_types_to_gapic_tools(tools) + + gapic_tool_config = None + if tool_config: + gapic_tool_config = tool_config._gapic_tool_config if ttl and expire_time: raise ValueError("Only one of ttl and expire_time can be set.") @@ -62,9 +87,9 @@ def _prepare_create_request( parent=f"projects/{project}/locations/{location}", cached_content=GapicCachedContent( model=model_name, - system_instruction=system_instruction, - tools=tools, - tool_config=tool_config, + system_instruction=gapic_system_instruction, + tools=gapic_tools, + tool_config=gapic_tool_config, contents=contents, expire_time=expire_time, ttl=ttl, @@ -136,7 +161,6 @@ def create( cls, *, model_name: str, - # TODO: allow the types to be wrapped types system_instruction: Optional[Content] = None, tools: Optional[List[Tool]] = None, tool_config: Optional[ToolConfig] = None, From 24dbdf153da0b0b5e9e3f88ecea63a81defb161b Mon Sep 17 00:00:00 2001 From: Zhenyi Qi Date: Wed, 5 Jun 2024 18:47:11 -0700 Subject: [PATCH 20/36] feat: add update, expire_time and delete method to CachedContent PiperOrigin-RevId: 640721964 --- vertexai/preview/caching.py | 39 +++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/vertexai/preview/caching.py b/vertexai/preview/caching.py index 6caa0bb44a..df0118fde6 100644 --- a/vertexai/preview/caching.py +++ b/vertexai/preview/caching.py @@ -32,6 +32,7 @@ from google.cloud.aiplatform_v1beta1.types.gen_ai_cache_service import ( CreateCachedContentRequest, GetCachedContentRequest, + UpdateCachedContentRequest, ) from vertexai.generative_models import _generative_models from vertexai.generative_models._generative_models import ( @@ -41,6 +42,7 @@ ToolConfig, ContentsType, ) +from google.protobuf import field_mask_pb2 def _prepare_create_request( @@ -232,3 +234,40 @@ def create( obj = cls(cached_content_resource.name) obj._gca_resource = cached_content_resource return obj + + def update( + self, + *, + expire_time: Optional[datetime.datetime] = None, + ttl: Optional[datetime.timedelta] = None, + ): + """Updates the expiration time of the cached content.""" + if expire_time and ttl: + raise ValueError("Only one of ttl and expire_time can be set.") + update_mask: List[str] = [] + + if ttl: + update_mask.append("description") + + if expire_time: + update_mask.append("expire_time") + + update_mask = field_mask_pb2.FieldMask(paths=update_mask) + request = UpdateCachedContentRequest( + cached_content=GapicCachedContent( + name=self.resource_name, + expire_time=expire_time, + ttl=ttl, + ), + update_mask=update_mask, + ) + self.api_client.update_cached_content(request) + + @property + def expire_time(self) -> datetime.datetime: + """Time this resource was last updated.""" + self._sync_gca_resource() + return self._gca_resource.expire_time + + def delete(self): + self._delete() From c118557fedba55f16886b51a5244d12346be6ca4 Mon Sep 17 00:00:00 2001 From: Zhenyi Qi Date: Wed, 5 Jun 2024 20:31:16 -0700 Subject: [PATCH 21/36] fix: GenAI-ExplicitCaching use the right update_mask for ttl update PiperOrigin-RevId: 640746573 --- vertexai/preview/caching.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vertexai/preview/caching.py b/vertexai/preview/caching.py index df0118fde6..324bdbf4ba 100644 --- a/vertexai/preview/caching.py +++ b/vertexai/preview/caching.py @@ -247,7 +247,7 @@ def update( update_mask: List[str] = [] if ttl: - update_mask.append("description") + update_mask.append("ttl") if expire_time: update_mask.append("expire_time") From 9d3561738d577129cb222417bf208166825d8043 Mon Sep 17 00:00:00 2001 From: Lingyin Wu Date: Wed, 5 Jun 2024 20:49:01 -0700 Subject: [PATCH 22/36] feat: Add hybrid search for public find_neighbors() call. PiperOrigin-RevId: 640750317 --- .../matching_engine_index_endpoint.py | 196 ++++++++++++------ .../test_matching_engine_index_endpoint.py | 88 +++++++- 2 files changed, 225 insertions(+), 59 deletions(-) diff --git a/google/cloud/aiplatform/matching_engine/matching_engine_index_endpoint.py b/google/cloud/aiplatform/matching_engine/matching_engine_index_endpoint.py index ff2b914f88..33eec35f58 100644 --- a/google/cloud/aiplatform/matching_engine/matching_engine_index_endpoint.py +++ b/google/cloud/aiplatform/matching_engine/matching_engine_index_endpoint.py @@ -16,7 +16,7 @@ # from dataclasses import dataclass, field -from typing import Dict, List, Optional, Sequence, Tuple +from typing import Dict, List, Optional, Sequence, Tuple, Union from google.auth import credentials as auth_credentials from google.cloud.aiplatform import base @@ -148,6 +148,37 @@ def __post_init__(self): ) +@dataclass +class HybridQuery: + """ + Hyrbid query. Could be used for dense-only or sparse-only or hybrid queries. + + dense_embedding (List[float]): + Optional. The dense part of the hybrid queries. + sparse_embedding_values (List[float]): + Optional. The sparse values of the sparse part of the queries. + + sparse_embedding_dimensions (List[int]): + Optional. The corresponding dimensions of the sparse values. + For example, values [1,2,3] with dimensions [4,5,6] means value 1 is of the + 4th dimension, value 2 is of the 4th dimension, and value 3 is of the 6th + dimension. + + rrf_ranking_alpha (float): + Optional. This should not be specified for dense-only or sparse-only queries. + A value between 0 and 1 for ranking algorithm RRF, representing + the ratio for sparse v.s. dense embeddings returned in the query result. + If the alpha is 0, only sparse embeddings are being returned, and no dense + embedding is being returned. When alhpa is 1, only dense embeddings are being + returned, and no sparse embedding is being returned. + """ + + dense_embedding: List[float] = None + sparse_embedding_values: List[float] = None + sparse_embedding_dimensions: List[int] = None + rrf_ranking_alpha: float = None + + @dataclass class MatchNeighbor: """The id and distance of a nearest neighbor match for a given query embedding. @@ -157,7 +188,7 @@ class MatchNeighbor: Required. The id of the neighbor. distance (float): Required. The distance to the query embedding. - feature_vector (List(float)): + feature_vector (List[float]): Optional. The feature vector of the matching datapoint. crowding_tag (Optional[str]): Optional. Crowding tag of the datapoint, the @@ -167,6 +198,14 @@ class MatchNeighbor: Optional. The restricts of the matching datapoint. numeric_restricts: Optional. The numeric restricts of the matching datapoint. + sparse_embedding_values (List[float]): + Optional. The sparse values of the sparse part of the matching + datapoint. + sparse_embedding_dimensions (List[int]): + Optional. The corresponding dimensions of the sparse values. + For example, values [1,2,3] with dimensions [4,5,6] means value 1 is + of the 4th dimension, value 2 is of the 4th dimension, and value 3 is + of the 6th dimension. """ @@ -176,6 +215,8 @@ class MatchNeighbor: crowding_tag: Optional[str] = None restricts: Optional[List[Namespace]] = None numeric_restricts: Optional[List[NumericNamespace]] = None + sparse_embedding_values: Optional[List[float]] = None + sparse_embedding_dimensions: Optional[List[int]] = None def from_index_datapoint( self, index_datapoint: gca_index_v1beta1.IndexDatapoint @@ -207,22 +248,31 @@ def from_index_datapoint( ] if index_datapoint.numeric_restricts is not None: self.numeric_restricts = [] - for restrict in index_datapoint.numeric_restricts: - numeric_namespace = None - restrict_value_type = restrict._pb.WhichOneof("Value") - if restrict_value_type == "value_int": - numeric_namespace = NumericNamespace( - name=restrict.namespace, value_int=restrict.value_int - ) - elif restrict_value_type == "value_float": - numeric_namespace = NumericNamespace( - name=restrict.namespace, value_float=restrict.value_float - ) - elif restrict_value_type == "value_double": - numeric_namespace = NumericNamespace( - name=restrict.namespace, value_double=restrict.value_double - ) - self.numeric_restricts.append(numeric_namespace) + for restrict in index_datapoint.numeric_restricts: + numeric_namespace = None + restrict_value_type = restrict._pb.WhichOneof("Value") + if restrict_value_type == "value_int": + numeric_namespace = NumericNamespace( + name=restrict.namespace, value_int=restrict.value_int + ) + elif restrict_value_type == "value_float": + numeric_namespace = NumericNamespace( + name=restrict.namespace, value_float=restrict.value_float + ) + elif restrict_value_type == "value_double": + numeric_namespace = NumericNamespace( + name=restrict.namespace, value_double=restrict.value_double + ) + self.numeric_restricts.append(numeric_namespace) + # sparse embeddings + if ( + index_datapoint.sparse_embedding is not None + and index_datapoint.sparse_embedding.values is not None + ): + self.sparse_embedding_values = index_datapoint.sparse_embedding.values + self.sparse_embedding_dimensions = ( + index_datapoint.sparse_embedding.dimensions + ) return self def from_embedding(self, embedding: match_service_pb2.Embedding) -> "MatchNeighbor": @@ -250,22 +300,22 @@ def from_embedding(self, embedding: match_service_pb2.Embedding) -> "MatchNeighb ] if embedding.numeric_restricts: self.numeric_restricts = [] - for restrict in embedding.numeric_restricts: - numeric_namespace = None - restrict_value_type = restrict.WhichOneof("Value") - if restrict_value_type == "value_int": - numeric_namespace = NumericNamespace( - name=restrict.name, value_int=restrict.value_int - ) - elif restrict_value_type == "value_float": - numeric_namespace = NumericNamespace( - name=restrict.name, value_float=restrict.value_float - ) - elif restrict_value_type == "value_double": - numeric_namespace = NumericNamespace( - name=restrict.name, value_double=restrict.value_double - ) - self.numeric_restricts.append(numeric_namespace) + for restrict in embedding.numeric_restricts: + numeric_namespace = None + restrict_value_type = restrict.WhichOneof("Value") + if restrict_value_type == "value_int": + numeric_namespace = NumericNamespace( + name=restrict.name, value_int=restrict.value_int + ) + elif restrict_value_type == "value_float": + numeric_namespace = NumericNamespace( + name=restrict.name, value_float=restrict.value_float + ) + elif restrict_value_type == "value_double": + numeric_namespace = NumericNamespace( + name=restrict.name, value_double=restrict.value_double + ) + self.numeric_restricts.append(numeric_namespace) return self @@ -1322,7 +1372,7 @@ def find_neighbors( self, *, deployed_index_id: str, - queries: Optional[List[List[float]]] = None, + queries: Optional[Union[List[List[float]], List[HybridQuery]]] = None, num_neighbors: int = 10, filter: Optional[List[Namespace]] = None, per_crowding_attribute_neighbor_count: Optional[int] = None, @@ -1346,8 +1396,15 @@ def find_neighbors( Args: deployed_index_id (str): Required. The ID of the DeployedIndex to match the queries against. - queries (List[List[float]]): - Required. A list of queries. Each query is a list of floats, representing a single embedding. + queries (Union[List[List[float]], List[HybridQuery]]): + Optional. A list of queries. + + For regular dense-only queries, each query is a list of floats, + representing a single embedding. + + For hybrid queries, each query is a hybrid query of type + aiplatform.matching_engine.matching_engine_index_endpoint.HybridQuery. + num_neighbors (int): Required. The number of nearest neighbors to be retrieved from database for each query. @@ -1381,7 +1438,7 @@ def find_neighbors( Note that returning full datapoint will significantly increase the latency and cost of the query. - numeric_filter (list[NumericNamespace]): + numeric_filter (List[NumericNamespace]): Optional. A list of NumericNamespaces for filtering the matching results. For example: [NumericNamespace(name="cost", value_int=5, op="GREATER")] @@ -1437,30 +1494,54 @@ def find_neighbors( numeric_restrict.value_double = numeric_namespace.value_double numeric_restricts.append(numeric_restrict) # Queries - query_by_id = False if queries else True - queries = queries if queries else embedding_ids - if queries: - for query in queries: - find_neighbors_query = gca_match_service_v1beta1.FindNeighborsRequest.Query( - neighbor_count=num_neighbors, - per_crowding_attribute_neighbor_count=per_crowding_attribute_neighbor_count, - approximate_neighbor_count=approx_num_neighbors, - fraction_leaf_nodes_to_search_override=fraction_leaf_nodes_to_search_override, - ) - datapoint = gca_index_v1beta1.IndexDatapoint( - datapoint_id=query if query_by_id else None, - feature_vector=None if query_by_id else query, - ) - datapoint.restricts.extend(restricts) - datapoint.numeric_restricts.extend(numeric_restricts) - find_neighbors_query.datapoint = datapoint - find_neighbors_request.queries.append(find_neighbors_query) + query_by_id = False + query_is_hybrid = False + if embedding_ids: + query_by_id = True + query_iterators: list[str] = embedding_ids + elif queries: + query_is_hybrid = isinstance(queries[0], HybridQuery) + query_iterators = queries else: raise ValueError( "To find neighbors using matching engine," - "please specify `queries` or `embedding_ids`" + "please specify `queries` or `embedding_ids` or `hybrid_queries`" ) + for query in query_iterators: + find_neighbors_query = gca_match_service_v1beta1.FindNeighborsRequest.Query( + neighbor_count=num_neighbors, + per_crowding_attribute_neighbor_count=per_crowding_attribute_neighbor_count, + approximate_neighbor_count=approx_num_neighbors, + fraction_leaf_nodes_to_search_override=fraction_leaf_nodes_to_search_override, + ) + if query_by_id: + datapoint = gca_index_v1beta1.IndexDatapoint( + datapoint_id=query, + ) + elif query_is_hybrid: + datapoint = gca_index_v1beta1.IndexDatapoint( + feature_vector=query.dense_embedding, + sparse_embedding=gca_index_v1beta1.IndexDatapoint.SparseEmbedding( + values=query.sparse_embedding_values, + dimensions=query.sparse_embedding_dimensions, + ), + ) + if query.rrf_ranking_alpha: + find_neighbors_query.rrf = ( + gca_match_service_v1beta1.FindNeighborsRequest.Query.RRF( + alpha=query.rrf_ranking_alpha, + ) + ) + else: + datapoint = gca_index_v1beta1.IndexDatapoint( + feature_vector=query, + ) + datapoint.restricts.extend(restricts) + datapoint.numeric_restricts.extend(numeric_restricts) + find_neighbors_query.datapoint = datapoint + find_neighbors_request.queries.append(find_neighbors_query) + response = self._public_match_client.find_neighbors(find_neighbors_request) # Wrap the results in MatchNeighbor objects and return @@ -1543,7 +1624,6 @@ def read_index_datapoints( read_index_datapoints_request ) - # Wrap the results and return return response.datapoints def _batch_get_embeddings( diff --git a/tests/unit/aiplatform/test_matching_engine_index_endpoint.py b/tests/unit/aiplatform/test_matching_engine_index_endpoint.py index 8fc693a23c..a2658700b5 100644 --- a/tests/unit/aiplatform/test_matching_engine_index_endpoint.py +++ b/tests/unit/aiplatform/test_matching_engine_index_endpoint.py @@ -29,6 +29,7 @@ Namespace, NumericNamespace, MatchNeighbor, + HybridQuery, ) from google.cloud.aiplatform.compat.types import ( matching_engine_deployed_index_ref as gca_matching_engine_deployed_index_ref, @@ -232,6 +233,18 @@ ] ] _TEST_QUERY_IDS = ["1", "2"] +_TEST_HYBRID_QUERIES = [ + HybridQuery( + sparse_embedding_dimensions=[1, 2, 3], + sparse_embedding_values=[0.1, 0.2, 0.3], + rrf_ranking_alpha=0.2, + ), + HybridQuery( + dense_embedding=_TEST_QUERIES[0], + sparse_embedding_dimensions=[1, 2, 3], + sparse_embedding_values=[0.1, 0.2, 0.3], + ), +] _TEST_NUM_NEIGHBOURS = 1 _TEST_FILTER = [ Namespace(name="class", allow_tokens=["token_1"], deny_tokens=["token_2"]) @@ -1278,7 +1291,7 @@ def test_index_private_service_connect_endpoint_match_queries( index_endpoint_match_queries_mock.assert_called_with(batch_request) @pytest.mark.usefixtures("get_index_public_endpoint_mock") - def test_index_public_endpoint_find_neighbors_queries( + def test_index_public_endpoint_find_neighbors_queries_backward_compatibility( self, index_public_endpoint_match_queries_mock ): aiplatform.init(project=_TEST_PROJECT) @@ -1326,6 +1339,79 @@ def test_index_public_endpoint_find_neighbors_queries( find_neighbors_request ) + @pytest.mark.usefixtures("get_index_public_endpoint_mock") + def test_index_public_endpoint_find_neighbors_queries( + self, index_public_endpoint_match_queries_mock + ): + aiplatform.init(project=_TEST_PROJECT) + + my_public_index_endpoint = aiplatform.MatchingEngineIndexEndpoint( + index_endpoint_name=_TEST_INDEX_ENDPOINT_ID + ) + + my_public_index_endpoint.find_neighbors( + deployed_index_id=_TEST_DEPLOYED_INDEX_ID, + num_neighbors=_TEST_NUM_NEIGHBOURS, + filter=_TEST_FILTER, + per_crowding_attribute_neighbor_count=_TEST_PER_CROWDING_ATTRIBUTE_NUM_NEIGHBOURS, + approx_num_neighbors=_TEST_APPROX_NUM_NEIGHBORS, + fraction_leaf_nodes_to_search_override=_TEST_FRACTION_LEAF_NODES_TO_SEARCH_OVERRIDE, + return_full_datapoint=_TEST_RETURN_FULL_DATAPOINT, + queries=_TEST_HYBRID_QUERIES, + ) + + find_neighbors_request = gca_match_service_v1beta1.FindNeighborsRequest( + index_endpoint=my_public_index_endpoint.resource_name, + deployed_index_id=_TEST_DEPLOYED_INDEX_ID, + queries=[ + gca_match_service_v1beta1.FindNeighborsRequest.Query( + neighbor_count=_TEST_NUM_NEIGHBOURS, + datapoint=gca_index_v1beta1.IndexDatapoint( + restricts=[ + gca_index_v1beta1.IndexDatapoint.Restriction( + namespace="class", + allow_list=["token_1"], + deny_list=["token_2"], + ) + ], + sparse_embedding=gca_index_v1beta1.IndexDatapoint.SparseEmbedding( + values=[0.1, 0.2, 0.3], dimensions=[1, 2, 3] + ), + ), + rrf=gca_match_service_v1beta1.FindNeighborsRequest.Query.RRF( + alpha=0.2, + ), + per_crowding_attribute_neighbor_count=_TEST_PER_CROWDING_ATTRIBUTE_NUM_NEIGHBOURS, + approximate_neighbor_count=_TEST_APPROX_NUM_NEIGHBORS, + fraction_leaf_nodes_to_search_override=_TEST_FRACTION_LEAF_NODES_TO_SEARCH_OVERRIDE, + ), + gca_match_service_v1beta1.FindNeighborsRequest.Query( + neighbor_count=_TEST_NUM_NEIGHBOURS, + datapoint=gca_index_v1beta1.IndexDatapoint( + feature_vector=_TEST_QUERIES[0], + restricts=[ + gca_index_v1beta1.IndexDatapoint.Restriction( + namespace="class", + allow_list=["token_1"], + deny_list=["token_2"], + ) + ], + sparse_embedding=gca_index_v1beta1.IndexDatapoint.SparseEmbedding( + values=[0.1, 0.2, 0.3], dimensions=[1, 2, 3] + ), + ), + per_crowding_attribute_neighbor_count=_TEST_PER_CROWDING_ATTRIBUTE_NUM_NEIGHBOURS, + approximate_neighbor_count=_TEST_APPROX_NUM_NEIGHBORS, + fraction_leaf_nodes_to_search_override=_TEST_FRACTION_LEAF_NODES_TO_SEARCH_OVERRIDE, + ), + ], + return_full_datapoint=_TEST_RETURN_FULL_DATAPOINT, + ) + + index_public_endpoint_match_queries_mock.assert_called_with( + find_neighbors_request + ) + @pytest.mark.usefixtures("get_index_public_endpoint_mock") def test_index_public_endpoint_find_neiggbor_query_by_id( self, index_public_endpoint_match_queries_mock From ea666de525eee3d56c1c05f327634615810efe96 Mon Sep 17 00:00:00 2001 From: Alexey Volkov Date: Wed, 5 Jun 2024 21:24:08 -0700 Subject: [PATCH 23/36] feat: Added support for adding request metadata When using the HTTP transport, this metadata is sent as HTTP headers. Usage: ``` vertexai.init( request_metadata=[("param1", "value1")], ) ``` PiperOrigin-RevId: 640757200 --- google/cloud/aiplatform/initializer.py | 66 ++++++++++++++++++- .../system/vertexai/test_generative_models.py | 9 +++ tests/unit/aiplatform/test_initializer.py | 57 ++++++++++++++++ 3 files changed, 130 insertions(+), 2 deletions(-) diff --git a/google/cloud/aiplatform/initializer.py b/google/cloud/aiplatform/initializer.py index a615baa4bb..34b53fc26f 100644 --- a/google/cloud/aiplatform/initializer.py +++ b/google/cloud/aiplatform/initializer.py @@ -17,11 +17,12 @@ from concurrent import futures +import functools import inspect import logging import os import types -from typing import Iterator, List, Optional, Type, TypeVar, Tuple, Union +from typing import Iterator, List, Optional, Sequence, Tuple, Type, TypeVar, Union from google.api_core import client_options from google.api_core import gapic_v1 @@ -108,6 +109,7 @@ def __init__(self): self._service_account = None self._api_endpoint = None self._api_transport = None + self._request_metadata = None def init( self, @@ -126,6 +128,7 @@ def init( service_account: Optional[str] = None, api_endpoint: Optional[str] = None, api_transport: Optional[str] = None, + request_metadata: Optional[Sequence[Tuple[str, str]]] = None, ): """Updates common initialization parameters with provided options. @@ -188,6 +191,8 @@ def init( Optional. The transport method which is either 'grpc' or 'rest'. NOTE: "rest" transport functionality is currently in a beta state (preview). + request_metadata: + Optional. Additional gRPC metadata to send with every client request. Raises: ValueError: If experiment_description is provided but experiment is not. @@ -235,6 +240,8 @@ def init( self._network = network if service_account is not None: self._service_account = service_account + if request_metadata is not None: + self._request_metadata = request_metadata # Finally, perform secondary state updates if experiment_tensorboard and not isinstance(experiment_tensorboard, bool): @@ -517,7 +524,11 @@ def create_client( else: kwargs["transport"] = self._api_transport - return client_class(**kwargs) + client = client_class(**kwargs) + # We only wrap the client if the request_metadata is set at the creation time. + if self._request_metadata: + client = _ClientWrapperThatAddsDefaultMetadata(client) + return client def _get_default_project_and_location(self) -> Tuple[str, str]: return ( @@ -526,6 +537,57 @@ def _get_default_project_and_location(self) -> Tuple[str, str]: ) +# Helper classes for adding default metadata to API requests. +# We're solving multiple non-trivial issues here. +# Intended behavior. +# The first big question is whether calling `vertexai.init(request_metadata=...)` +# should change the existing clients. +# This question is non-trivial. Client's client options are immutable. +# But changes to default project, location and credentials affect SDK calls immediately. +# It can be argued that default metadata should affect previously created clients. +# Implementation. +# There are 3 kinds of clients: +# 1) Raw GAPIC client (there are also different transports like "grpc" and "rest") +# 2) ClientWithOverride with _is_temporary=True +# 3) ClientWithOverride with _is_temporary=False +# While a raw client or a non-temporary ClientWithOverride object can be patched once +# (`callable._metadata for callable in client._transport._wrapped_methods.values()`), +# a temporary `ClientWithOverride` creates new client at every call and they +# need to be dynamically patched. +# The temporary `ClientWithOverride` case requires dynamic wrapping/patching. +# A client wrapper, that dynamically wraps methods to add metadata, solves all 3 cases. +class _ClientWrapperThatAddsDefaultMetadata: + """A client wrapper that dynamically wraps methods to add default metadata.""" + + def __init__(self, client): + self._client = client + + def __getattr__(self, name: str): + result = getattr(self._client, name) + if global_config._request_metadata and callable(result): + func = result + if "metadata" in inspect.signature(func).parameters: + return _FunctionWrapperThatAddsDefaultMetadata(func) + return result + + +class _FunctionWrapperThatAddsDefaultMetadata: + """A function wrapper that wraps a function/method to add default metadata.""" + + def __init__(self, func): + self._func = func + functools.update_wrapper(self, func) + + def __call__(self, *args, **kwargs): + # Start with default metadata (copy it) + metadata_list = list(global_config._request_metadata or []) + # Add per-request metadata (overrides defaults) + # The "metadata" argument is removed from "kwargs" + metadata_list.extend(kwargs.pop("metadata", [])) + # Call the wrapped function with extra metadata + return self._func(*args, **kwargs, metadata=metadata_list) + + # global config to store init parameters: ie, aiplatform.init(project=..., location=...) global_config = _Config() diff --git a/tests/system/vertexai/test_generative_models.py b/tests/system/vertexai/test_generative_models.py index 1282e99c26..292f8a0dfa 100644 --- a/tests/system/vertexai/test_generative_models.py +++ b/tests/system/vertexai/test_generative_models.py @@ -429,3 +429,12 @@ def test_chat_automatic_function_calling(self): assert chat.history[-3].parts[0].function_call.name == "get_current_weather" assert chat.history[-2].parts[0].function_response assert chat.history[-2].parts[0].function_response.name == "get_current_weather" + + def test_additional_request_metadata(self): + aiplatform.init(request_metadata=[("foo", "bar")]) + model = generative_models.GenerativeModel(GEMINI_MODEL_NAME) + response = model.generate_content( + "Why is sky blue?", + generation_config=generative_models.GenerationConfig(temperature=0), + ) + assert response diff --git a/tests/unit/aiplatform/test_initializer.py b/tests/unit/aiplatform/test_initializer.py index d3993014f0..455ee0791a 100644 --- a/tests/unit/aiplatform/test_initializer.py +++ b/tests/unit/aiplatform/test_initializer.py @@ -32,6 +32,7 @@ from google.cloud.aiplatform.utils import resource_manager_utils from google.cloud.aiplatform.compat.services import ( model_service_client, + prediction_service_client_v1beta1, ) import constants as test_constants @@ -451,6 +452,62 @@ def test_init_with_only_project_does_not_override_set_creds(self): initializer.global_config.init(project=_TEST_PROJECT_2) assert initializer.global_config.credentials is creds + def test_create_client_with_request_metadata_model_service(self): + global_metadata = [ + ("global_param", "value1"), + ] + request_metadata = [ + ("request_param", "value2"), + ] + initializer.global_config.init( + project=_TEST_PROJECT, + location=_TEST_LOCATION, + request_metadata=global_metadata, + api_transport="rest", + ) + client = initializer.global_config.create_client( + client_class=utils.ModelClientWithOverride + ) + model_name = client.model_path( + project=_TEST_PROJECT, + location=_TEST_LOCATION, + model="model_id", + ) + with patch("requests.sessions.Session.get") as mock_get: + mock_get.return_value.status_code = 200 + mock_get.return_value.content = "{}" + client.get_model(name=model_name, metadata=request_metadata) + call_kwargs = mock_get.call_args_list[0][1] + headers = call_kwargs["headers"] + for metadata_key in ["global_param", "request_param"]: + assert metadata_key in headers + + def test_create_client_with_request_metadata_prediction_service(self): + global_metadata = [ + ("global_param", "value1"), + ] + request_metadata = [ + ("request_param", "value2"), + ] + initializer.global_config.init( + project=_TEST_PROJECT, + location=_TEST_LOCATION, + request_metadata=global_metadata, + api_transport="rest", + ) + client = initializer.global_config.create_client( + client_class=prediction_service_client_v1beta1.PredictionServiceClient + ) + model_name = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/publishers/google/models/gemini-1.0-pro" + with patch("requests.sessions.Session.post") as mock_post: + mock_post.return_value.status_code = 200 + mock_post.return_value.content = "{}" + client.generate_content(model=model_name, metadata=request_metadata) + call_kwargs = mock_post.call_args_list[0][1] + headers = call_kwargs["headers"] + for metadata_key in ["global_param", "request_param"]: + assert metadata_key in headers + class TestThreadPool: def teardown_method(self): From 70168eb815b3ecdf53351b884c0d116437e9e518 Mon Sep 17 00:00:00 2001 From: Zhenyi Qi Date: Wed, 5 Jun 2024 21:40:30 -0700 Subject: [PATCH 24/36] feat: GenAI - Explicit Caching - Allow `GenerativeModel` to be created with `CachedContent` PiperOrigin-RevId: 640760761 --- google/cloud/aiplatform/compat/__init__.py | 4 + google/cloud/aiplatform/utils/__init__.py | 8 +- tests/unit/vertexai/test_caching.py | 6 +- tests/unit/vertexai/test_generative_models.py | 108 ++++++++++++++++++ .../generative_models/_generative_models.py | 46 +++++--- 5 files changed, 152 insertions(+), 20 deletions(-) diff --git a/google/cloud/aiplatform/compat/__init__.py b/google/cloud/aiplatform/compat/__init__.py index 5583d68ed1..b3fee8f643 100644 --- a/google/cloud/aiplatform/compat/__init__.py +++ b/google/cloud/aiplatform/compat/__init__.py @@ -173,6 +173,8 @@ services.featurestore_online_serving_service_client_v1 ) services.featurestore_service_client = services.featurestore_service_client_v1 + # TODO(b/342585299): Temporary code. Switch to v1 once v1 is available. + services.gen_ai_cache_service_client = services.gen_ai_cache_service_client_v1beta1 services.job_service_client = services.job_service_client_v1 services.model_garden_service_client = services.model_garden_service_client_v1 services.model_service_client = services.model_service_client_v1 @@ -193,6 +195,8 @@ types.annotation_spec = types.annotation_spec_v1 types.artifact = types.artifact_v1 types.batch_prediction_job = types.batch_prediction_job_v1 + # TODO(b/342585299): Temporary code. Switch to v1 once v1 is available. + types.cached_content = types.cached_content_v1beta1 types.completion_stats = types.completion_stats_v1 types.context = types.context_v1 types.custom_job = types.custom_job_v1 diff --git a/google/cloud/aiplatform/utils/__init__.py b/google/cloud/aiplatform/utils/__init__.py index 73cded31ce..371602efa5 100644 --- a/google/cloud/aiplatform/utils/__init__.py +++ b/google/cloud/aiplatform/utils/__init__.py @@ -737,9 +737,13 @@ class FeaturestoreOnlineServingClientWithOverride(ClientWithOverride): class GenAiCacheServiceClientWithOverride(ClientWithOverride): _is_temporary = True - # TODO(b/342585299): Switch to compat.DEFAULT_VERSION once v1 is available. - _default_version = "v1beta1" + _default_version = compat.DEFAULT_VERSION _version_map = ( + ( + compat.V1, + # TODO(b/342585299): Temporary code. Switch to v1 once v1 is available. + gen_ai_cache_service_client_v1beta1.GenAiCacheServiceClient, + ), ( compat.V1BETA1, gen_ai_cache_service_client_v1beta1.GenAiCacheServiceClient, diff --git a/tests/unit/vertexai/test_caching.py b/tests/unit/vertexai/test_caching.py index a6e7c75c2b..d3906865be 100644 --- a/tests/unit/vertexai/test_caching.py +++ b/tests/unit/vertexai/test_caching.py @@ -65,10 +65,11 @@ def create_cached_content(self, request): def mock_get_cached_content(): """Mocks GenAiCacheServiceClient.get_cached_content().""" - def get_cached_content(self, name, retry): + def get_cached_content(self, name, retry=None): del self, retry response = GapicCachedContent( name=f"{name}", + model="model-name", ) return response @@ -106,13 +107,16 @@ def test_constructor_with_full_resource_name(self, mock_get_cached_content): def test_constructor_with_only_content_id(self, mock_get_cached_content): partial_resource_name = "contents-id" + cache = caching.CachedContent( cached_content_name=partial_resource_name, ) + assert cache.name == "contents-id" assert cache.resource_name == ( f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/cachedContents/contents-id" ) + assert cache.model_name == "model-name" def test_create_with_real_payload( self, mock_create_cached_content, mock_get_cached_content diff --git a/tests/unit/vertexai/test_generative_models.py b/tests/unit/vertexai/test_generative_models.py index 948d50f12f..6ad0c01319 100644 --- a/tests/unit/vertexai/test_generative_models.py +++ b/tests/unit/vertexai/test_generative_models.py @@ -33,7 +33,14 @@ gapic_content_types, gapic_tool_types, ) +from google.cloud.aiplatform_v1beta1.types.cached_content import ( + CachedContent as GapicCachedContent, +) +from google.cloud.aiplatform_v1beta1.services import ( + gen_ai_cache_service, +) from vertexai.generative_models import _function_calling_utils +from vertexai.preview import caching _TEST_PROJECT = "test-project" @@ -296,6 +303,18 @@ def mock_generate_content( return response +@pytest.fixture +def mock_generate_content_fixture(): + """Mocks PredictionServiceClient.generate_content().""" + + with mock.patch.object( + prediction_service.PredictionServiceClient, + "generate_content", + new=mock_generate_content, + ) as generate_content: + yield generate_content + + def mock_stream_generate_content( self, request: gapic_prediction_service_types.GenerateContentRequest, @@ -331,6 +350,26 @@ def mock_stream_generate_content( yield blocked_chunk +@pytest.fixture +def mock_get_cached_content_fixture(): + """Mocks GenAiCacheServiceClient.get_cached_content().""" + + def get_cached_content(self, name, retry=None): + del self, retry + response = GapicCachedContent( + name=f"{name}", + model="gemini-pro-from-mock-get-cached-content", + ) + return response + + with mock.patch.object( + gen_ai_cache_service.client.GenAiCacheServiceClient, + "get_cached_content", + new=get_cached_content, + ) as get_cached_content: + yield get_cached_content + + def get_current_weather(location: str, unit: Optional[str] = "centigrade"): """Gets weather in the specified location. @@ -405,6 +444,40 @@ def test_generative_model_constructor_model_name( with pytest.raises(ValueError): generative_models.GenerativeModel("foo/bar/models/gemini-pro") + def test_generative_model_from_cached_content( + self, mock_get_cached_content_fixture + ): + project_location_prefix = ( + f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/" + ) + cached_content = caching.CachedContent( + "cached-content-id-in-from-cached-content-test" + ) + + model = preview_generative_models.GenerativeModel.from_cached_content( + cached_content=cached_content + ) + + assert ( + model._prediction_resource_name + == project_location_prefix + + "publishers/google/models/" + + "gemini-pro-from-mock-get-cached-content" + ) + assert ( + model._cached_content.model_name + == "gemini-pro-from-mock-get-cached-content" + ) + assert ( + model._cached_content.resource_name + == f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/" + "cachedContents/cached-content-id-in-from-cached-content-test" + ) + assert ( + model._cached_content.name + == "cached-content-id-in-from-cached-content-test" + ) + @mock.patch.object( target=prediction_service.PredictionServiceClient, attribute="generate_content", @@ -482,6 +555,41 @@ def test_generate_content(self, generative_models: generative_models): ) assert response3.text + @mock.patch.object( + target=prediction_service.PredictionServiceClient, + attribute="generate_content", + new=lambda self, request: gapic_prediction_service_types.GenerateContentResponse( + candidates=[ + gapic_content_types.Candidate( + index=0, + content=gapic_content_types.Content( + role="model", + parts=[ + gapic_content_types.Part( + {"text": f"response to {request.cached_content}"} + ) + ], + ), + ), + ], + ), + ) + def test_generate_content_with_cached_content( + self, + mock_get_cached_content_fixture, + ): + cached_content = caching.CachedContent( + "cached-content-id-in-from-cached-content-test" + ) + + model = preview_generative_models.GenerativeModel.from_cached_content( + cached_content=cached_content + ) + + response = model.generate_content("Why is sky blue?") + + assert response.text == "response to " + cached_content.resource_name + @mock.patch.object( target=prediction_service.PredictionServiceClient, attribute="stream_generate_content", diff --git a/vertexai/generative_models/_generative_models.py b/vertexai/generative_models/_generative_models.py index 08ae92d486..7dddf8e649 100644 --- a/vertexai/generative_models/_generative_models.py +++ b/vertexai/generative_models/_generative_models.py @@ -326,26 +326,13 @@ def __init__( Note: Only text should be used in parts. Content of each part will become a separate paragraph. """ - if not model_name: - raise ValueError("model_name must not be empty") - if "/" not in model_name: - model_name = "publishers/google/models/" + model_name - if model_name.startswith("models/"): - model_name = "publishers/google/" + model_name - project = aiplatform_initializer.global_config.project location = aiplatform_initializer.global_config.location + model_name = _reconcile_model_name(model_name, project, location) - if model_name.startswith("publishers/"): - prediction_resource_name = ( - f"projects/{project}/locations/{location}/{model_name}" - ) - elif model_name.startswith("projects/"): - prediction_resource_name = model_name - else: - raise ValueError( - "model_name must be either a Model Garden model ID or a full resource name." - ) + prediction_resource_name = _get_resource_name_from_model_name( + model_name, project, location + ) location = aiplatform_utils.extract_project_and_location_from_parent( prediction_resource_name @@ -359,6 +346,7 @@ def __init__( self._tools = tools self._tool_config = tool_config self._system_instruction = system_instruction + self._cached_content: Optional["caching.CachedContent"] = None # Validating the parameters _validate_generate_content_parameters( @@ -417,6 +405,7 @@ def _prepare_request( tools = tools or self._tools tool_config = tool_config or self._tool_config system_instruction = system_instruction or self._system_instruction + cached_content = self._cached_content _validate_generate_content_parameters( contents=contents, @@ -425,6 +414,7 @@ def _prepare_request( tools=tools, tool_config=tool_config, system_instruction=system_instruction, + cached_content=cached_content, ) contents = _content_types_to_gapic_contents(contents) @@ -483,6 +473,7 @@ def _prepare_request( tools=gapic_tools, tool_config=gapic_tool_config, system_instruction=gapic_system_instruction, + cached_content=cached_content.resource_name if cached_content else None, ) def _parse_response( @@ -2609,3 +2600,24 @@ def start_chat( response_validation=response_validation, responder=responder, ) + + @classmethod + def from_cached_content( + cls, + cached_content: "caching.CachedContent", + *, + generation_config: Optional[GenerationConfigType] = None, + safety_settings: Optional[SafetySettingsType] = None, + ) -> "_GenerativeModel": + model_name = cached_content.model_name + model = cls( + model_name=model_name, + generation_config=generation_config, + safety_settings=safety_settings, + tools=None, + tool_config=None, + system_instruction=None, + ) + model._cached_content = cached_content + + return model From c23c0ada07146f0e5ce6a787c8255313f7c4a06c Mon Sep 17 00:00:00 2001 From: Zhenyi Qi Date: Wed, 5 Jun 2024 22:04:14 -0700 Subject: [PATCH 25/36] fix: Allow non-lro delete method PiperOrigin-RevId: 640765520 --- google/cloud/aiplatform/base.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/google/cloud/aiplatform/base.py b/google/cloud/aiplatform/base.py index 8f595d9d1a..a0789f226f 100644 --- a/google/cloud/aiplatform/base.py +++ b/google/cloud/aiplatform/base.py @@ -1199,12 +1199,16 @@ def _list_with_local_order( def _delete(self) -> None: """Deletes this Vertex AI resource. WARNING: This deletion is permanent.""" _LOGGER.log_action_start_against_resource("Deleting", "", self) - lro = getattr(self.api_client, self._delete_method)(name=self.resource_name) - _LOGGER.log_action_started_against_resource_with_lro( - "Delete", "", self.__class__, lro + possible_lro = getattr(self.api_client, self._delete_method)( + name=self.resource_name ) - lro.result() - _LOGGER.log_action_completed_against_resource("deleted.", "", self) + + if possible_lro: + _LOGGER.log_action_started_against_resource_with_lro( + "Delete", "", self.__class__, possible_lro + ) + possible_lro.result() + _LOGGER.log_action_completed_against_resource("deleted.", "", self) class VertexAiResourceNounWithFutureManager(_VertexAiResourceNounPlus, FutureManager): From b7056e74bcd180d8c07b6ac4ba566e3d3b6ce356 Mon Sep 17 00:00:00 2001 From: Zhenyi Qi Date: Thu, 6 Jun 2024 07:00:19 -0700 Subject: [PATCH 26/36] feat: GenAI - ExplicitCaching - add public list() with no parameters PiperOrigin-RevId: 640883756 --- vertexai/preview/caching.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/vertexai/preview/caching.py b/vertexai/preview/caching.py index 324bdbf4ba..00bab94fe8 100644 --- a/vertexai/preview/caching.py +++ b/vertexai/preview/caching.py @@ -271,3 +271,9 @@ def expire_time(self) -> datetime.datetime: def delete(self): self._delete() + + @classmethod + def list(cls): + # TODO(b/345326114): Make list() interface richer after aligning with + # Google AI SDK + return cls._list() From 48769a8aa8242857fdc7282e6744b3dfe01142f0 Mon Sep 17 00:00:00 2001 From: Zhenyi Qi Date: Thu, 6 Jun 2024 07:33:33 -0700 Subject: [PATCH 27/36] chore: GenAI - Explicit Caching - Add integration test for cached content feature PiperOrigin-RevId: 640892237 --- .../system/vertexai/test_generative_models.py | 41 +++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/tests/system/vertexai/test_generative_models.py b/tests/system/vertexai/test_generative_models.py index 292f8a0dfa..4fb9c877e0 100644 --- a/tests/system/vertexai/test_generative_models.py +++ b/tests/system/vertexai/test_generative_models.py @@ -25,13 +25,16 @@ from google.cloud import aiplatform from tests.system.aiplatform import e2e_base from vertexai import generative_models +from vertexai.generative_models import Content from vertexai.preview import ( generative_models as preview_generative_models, ) +from vertexai.preview import caching GEMINI_MODEL_NAME = "gemini-1.0-pro-002" GEMINI_VISION_MODEL_NAME = "gemini-1.0-pro-vision" GEMINI_15_MODEL_NAME = "gemini-1.5-pro-preview-0409" +GEMINI_15_0514_MODEL_NAME = "gemini-1.5-pro-preview-0514" # A dummy function for function calling @@ -97,6 +100,44 @@ def setup_method(self): credentials=credentials, ) + def test_generate_content_with_cached_content_from_text(self): + cached_content = caching.CachedContent.create( + model_name=GEMINI_15_0514_MODEL_NAME, + system_instruction="Please answer all the questions like a pirate.", + contents=[ + Content.from_dict( + { + "role": "user", + "parts": [ + { + "file_data": { + "mime_type": "application/pdf", + "file_uri": "gs://ucaip-samples-us-central1/sdk_system_test_resources/megatro-llm.pdf", + } + } + for _ in range(10) + ] + + [ + {"text": "Please try to summarize the previous contents."}, + ], + } + ) + ], + ) + + model = generative_models.GenerativeModel.from_cached_content( + cached_content=cached_content + ) + + response = model.generate_content( + "Why is sky blue?", + generation_config=generative_models.GenerationConfig(temperature=0), + ) + try: + assert response.text + finally: + cached_content.delete() + def test_generate_content_from_text(self): model = generative_models.GenerativeModel(GEMINI_MODEL_NAME) response = model.generate_content( From bd4c09ca05479546e6974573c1fd77975a218b40 Mon Sep 17 00:00:00 2001 From: lingyinw Date: Thu, 6 Jun 2024 07:56:38 -0700 Subject: [PATCH 28/36] Copybara import of the project: -- bd20eddda3bfb962422c1b7402140590fe0d5a6c by lingyinw : fix: regenerate pb2 files using grpcio-tools, add hybrid search COPYBARA_INTEGRATE_REVIEW=https://github.com/googleapis/python-aiplatform/pull/3665 from lingyinw:main caa6b044a340e21f64a0e5276b50ae10fe4a56c8 PiperOrigin-RevId: 640897890 --- .../_protos/match_service.proto | 28 +++++++ .../_protos/match_service_pb2.py | 82 ++++++++++++------- 2 files changed, 82 insertions(+), 28 deletions(-) diff --git a/google/cloud/aiplatform/matching_engine/_protos/match_service.proto b/google/cloud/aiplatform/matching_engine/_protos/match_service.proto index c20a6cd003..6224d1b0fc 100644 --- a/google/cloud/aiplatform/matching_engine/_protos/match_service.proto +++ b/google/cloud/aiplatform/matching_engine/_protos/match_service.proto @@ -34,6 +34,34 @@ message MatchRequest { // The embedding values. repeated float float_val = 2; + // Feature embedding vector for sparse index. An array of numbers whose values + // are located in the specified dimensions. + message SparseEmbedding { + + // The list of embedding values of the sparse vector. + repeated float float_val = 1; + + // The list of indexes for the embedding values of the sparse vector. + repeated int64 dimension = 2; + } + + // Feature embedding vector for sparse index. + SparseEmbedding sparse_embedding = 12; + + // Parameters for RRF algorithm that combines search results. + message RRF { + + // Users can provide an alpha value to give more weight to sparse vs dense. + // For example, if the alpha is 0, we don't return dense at all, if it's 1, + // we don't return sparse at all. + float alpha = 1; + } + + oneof ranking { + // Represents RRF algorithm that combines search results. + RRF rrf = 13; + } + // The number of nearest neighbors to be retrieved from database for // each query. If not set, will use the default from // the service configuration. diff --git a/google/cloud/aiplatform/matching_engine/_protos/match_service_pb2.py b/google/cloud/aiplatform/matching_engine/_protos/match_service_pb2.py index e5cfbd16f5..f84d6f397d 100644 --- a/google/cloud/aiplatform/matching_engine/_protos/match_service_pb2.py +++ b/google/cloud/aiplatform/matching_engine/_protos/match_service_pb2.py @@ -32,11 +32,13 @@ DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile( - b'\nCgoogle/cloud/aiplatform/matching_engine/_protos/match_service.proto\x12$google.cloud.aiplatform.container.v1\x1a\x17google/rpc/status.proto"\xc6\x03\n\x0cMatchRequest\x12\x19\n\x11\x64\x65ployed_index_id\x18\x01 \x01(\t\x12\x11\n\tfloat_val\x18\x02 \x03(\x02\x12\x15\n\rnum_neighbors\x18\x03 \x01(\x05\x12\x42\n\trestricts\x18\x04 \x03(\x0b\x32/.google.cloud.aiplatform.container.v1.Namespace\x12Q\n\x11numeric_restricts\x18\x0b \x03(\x0b\x32\x36.google.cloud.aiplatform.container.v1.NumericNamespace\x12,\n$per_crowding_attribute_num_neighbors\x18\x05 \x01(\x05\x12\x1c\n\x14\x61pprox_num_neighbors\x18\x06 \x01(\x05\x12-\n%leaf_nodes_to_search_percent_override\x18\x07 \x01(\x05\x12.\n&fraction_leaf_nodes_to_search_override\x18\t \x01(\x01\x12\x19\n\x11\x65mbedding_enabled\x18\x08 \x01(\x08\x12\x14\n\x0c\x65mbedding_id\x18\n \x01(\t"\xdd\x01\n\tEmbedding\x12\n\n\x02id\x18\x01 \x01(\t\x12\x11\n\tfloat_val\x18\x02 \x03(\x02\x12\x42\n\trestricts\x18\x03 \x03(\x0b\x32/.google.cloud.aiplatform.container.v1.Namespace\x12Q\n\x11numeric_restricts\x18\x05 \x03(\x0b\x32\x36.google.cloud.aiplatform.container.v1.NumericNamespace\x12\x1a\n\x12\x63rowding_attribute\x18\x04 \x01(\x03"\xea\x01\n\rMatchResponse\x12N\n\x08neighbor\x18\x01 \x03(\x0b\x32<.google.cloud.aiplatform.container.v1.MatchResponse.Neighbor\x12\x43\n\nembeddings\x18\x02 \x03(\x0b\x32/.google.cloud.aiplatform.container.v1.Embedding\x1a\x44\n\x08Neighbor\x12\n\n\x02id\x18\x01 \x01(\t\x12\x10\n\x08\x64istance\x18\x02 \x01(\x01\x12\x1a\n\x12\x63rowding_attribute\x18\x03 \x01(\x03"B\n\x19\x42\x61tchGetEmbeddingsRequest\x12\x19\n\x11\x64\x65ployed_index_id\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x03(\t"a\n\x1a\x42\x61tchGetEmbeddingsResponse\x12\x43\n\nembeddings\x18\x01 \x03(\x0b\x32/.google.cloud.aiplatform.container.v1.Embedding"\x95\x02\n\x11\x42\x61tchMatchRequest\x12\x63\n\x08requests\x18\x01 \x03(\x0b\x32Q.google.cloud.aiplatform.container.v1.BatchMatchRequest.BatchMatchRequestPerIndex\x1a\x9a\x01\n\x19\x42\x61tchMatchRequestPerIndex\x12\x19\n\x11\x64\x65ployed_index_id\x18\x01 \x01(\t\x12\x44\n\x08requests\x18\x02 \x03(\x0b\x32\x32.google.cloud.aiplatform.container.v1.MatchRequest\x12\x1c\n\x14low_level_batch_size\x18\x03 \x01(\x05"\xa2\x02\n\x12\x42\x61tchMatchResponse\x12\x66\n\tresponses\x18\x01 \x03(\x0b\x32S.google.cloud.aiplatform.container.v1.BatchMatchResponse.BatchMatchResponsePerIndex\x1a\xa3\x01\n\x1a\x42\x61tchMatchResponsePerIndex\x12\x19\n\x11\x64\x65ployed_index_id\x18\x01 \x01(\t\x12\x46\n\tresponses\x18\x02 \x03(\x0b\x32\x33.google.cloud.aiplatform.container.v1.MatchResponse\x12"\n\x06status\x18\x03 \x01(\x0b\x32\x12.google.rpc.Status"D\n\tNamespace\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x61llow_tokens\x18\x02 \x03(\t\x12\x13\n\x0b\x64\x65ny_tokens\x18\x03 \x03(\t"\xb4\x02\n\x10NumericNamespace\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\tvalue_int\x18\x02 \x01(\x03H\x00\x12\x15\n\x0bvalue_float\x18\x03 \x01(\x02H\x00\x12\x16\n\x0cvalue_double\x18\x04 \x01(\x01H\x00\x12K\n\x02op\x18\x05 \x01(\x0e\x32?.google.cloud.aiplatform.container.v1.NumericNamespace.Operator"x\n\x08Operator\x12\x18\n\x14OPERATOR_UNSPECIFIED\x10\x00\x12\x08\n\x04LESS\x10\x01\x12\x0e\n\nLESS_EQUAL\x10\x02\x12\t\n\x05\x45QUAL\x10\x03\x12\x11\n\rGREATER_EQUAL\x10\x04\x12\x0b\n\x07GREATER\x10\x05\x12\r\n\tNOT_EQUAL\x10\x06\x42\x07\n\x05Value2\xa2\x03\n\x0cMatchService\x12r\n\x05Match\x12\x32.google.cloud.aiplatform.container.v1.MatchRequest\x1a\x33.google.cloud.aiplatform.container.v1.MatchResponse"\x00\x12\x81\x01\n\nBatchMatch\x12\x37.google.cloud.aiplatform.container.v1.BatchMatchRequest\x1a\x38.google.cloud.aiplatform.container.v1.BatchMatchResponse"\x00\x12\x99\x01\n\x12\x42\x61tchGetEmbeddings\x12?.google.cloud.aiplatform.container.v1.BatchGetEmbeddingsRequest\x1a@.google.cloud.aiplatform.container.v1.BatchGetEmbeddingsResponse"\x00\x62\x06proto3' + b'\nCgoogle/cloud/aiplatform/matching_engine/_protos/match_service.proto\x12$google.cloud.aiplatform.container.v1\x1a\x17google/rpc/status.proto"\xc5\x05\n\x0cMatchRequest\x12\x19\n\x11\x64\x65ployed_index_id\x18\x01 \x01(\t\x12\x11\n\tfloat_val\x18\x02 \x03(\x02\x12\\\n\x10sparse_embedding\x18\x0c \x01(\x0b\x32\x42.google.cloud.aiplatform.container.v1.MatchRequest.SparseEmbedding\x12\x45\n\x03rrf\x18\r \x01(\x0b\x32\x36.google.cloud.aiplatform.container.v1.MatchRequest.RRFH\x00\x12\x15\n\rnum_neighbors\x18\x03 \x01(\x05\x12\x42\n\trestricts\x18\x04 \x03(\x0b\x32/.google.cloud.aiplatform.container.v1.Namespace\x12Q\n\x11numeric_restricts\x18\x0b \x03(\x0b\x32\x36.google.cloud.aiplatform.container.v1.NumericNamespace\x12,\n$per_crowding_attribute_num_neighbors\x18\x05 \x01(\x05\x12\x1c\n\x14\x61pprox_num_neighbors\x18\x06 \x01(\x05\x12-\n%leaf_nodes_to_search_percent_override\x18\x07 \x01(\x05\x12.\n&fraction_leaf_nodes_to_search_override\x18\t \x01(\x01\x12\x19\n\x11\x65mbedding_enabled\x18\x08 \x01(\x08\x12\x14\n\x0c\x65mbedding_id\x18\n \x01(\t\x1a\x37\n\x0fSparseEmbedding\x12\x11\n\tfloat_val\x18\x01 \x03(\x02\x12\x11\n\tdimension\x18\x02 \x03(\x03\x1a\x14\n\x03RRF\x12\r\n\x05\x61lpha\x18\x01 \x01(\x02\x42\t\n\x07ranking"\xdd\x01\n\tEmbedding\x12\n\n\x02id\x18\x01 \x01(\t\x12\x11\n\tfloat_val\x18\x02 \x03(\x02\x12\x42\n\trestricts\x18\x03 \x03(\x0b\x32/.google.cloud.aiplatform.container.v1.Namespace\x12Q\n\x11numeric_restricts\x18\x05 \x03(\x0b\x32\x36.google.cloud.aiplatform.container.v1.NumericNamespace\x12\x1a\n\x12\x63rowding_attribute\x18\x04 \x01(\x03"\xea\x01\n\rMatchResponse\x12N\n\x08neighbor\x18\x01 \x03(\x0b\x32<.google.cloud.aiplatform.container.v1.MatchResponse.Neighbor\x12\x43\n\nembeddings\x18\x02 \x03(\x0b\x32/.google.cloud.aiplatform.container.v1.Embedding\x1a\x44\n\x08Neighbor\x12\n\n\x02id\x18\x01 \x01(\t\x12\x10\n\x08\x64istance\x18\x02 \x01(\x01\x12\x1a\n\x12\x63rowding_attribute\x18\x03 \x01(\x03"B\n\x19\x42\x61tchGetEmbeddingsRequest\x12\x19\n\x11\x64\x65ployed_index_id\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x03(\t"a\n\x1a\x42\x61tchGetEmbeddingsResponse\x12\x43\n\nembeddings\x18\x01 \x03(\x0b\x32/.google.cloud.aiplatform.container.v1.Embedding"\x95\x02\n\x11\x42\x61tchMatchRequest\x12\x63\n\x08requests\x18\x01 \x03(\x0b\x32Q.google.cloud.aiplatform.container.v1.BatchMatchRequest.BatchMatchRequestPerIndex\x1a\x9a\x01\n\x19\x42\x61tchMatchRequestPerIndex\x12\x19\n\x11\x64\x65ployed_index_id\x18\x01 \x01(\t\x12\x44\n\x08requests\x18\x02 \x03(\x0b\x32\x32.google.cloud.aiplatform.container.v1.MatchRequest\x12\x1c\n\x14low_level_batch_size\x18\x03 \x01(\x05"\xa2\x02\n\x12\x42\x61tchMatchResponse\x12\x66\n\tresponses\x18\x01 \x03(\x0b\x32S.google.cloud.aiplatform.container.v1.BatchMatchResponse.BatchMatchResponsePerIndex\x1a\xa3\x01\n\x1a\x42\x61tchMatchResponsePerIndex\x12\x19\n\x11\x64\x65ployed_index_id\x18\x01 \x01(\t\x12\x46\n\tresponses\x18\x02 \x03(\x0b\x32\x33.google.cloud.aiplatform.container.v1.MatchResponse\x12"\n\x06status\x18\x03 \x01(\x0b\x32\x12.google.rpc.Status"D\n\tNamespace\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x61llow_tokens\x18\x02 \x03(\t\x12\x13\n\x0b\x64\x65ny_tokens\x18\x03 \x03(\t"\xb4\x02\n\x10NumericNamespace\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\tvalue_int\x18\x02 \x01(\x03H\x00\x12\x15\n\x0bvalue_float\x18\x03 \x01(\x02H\x00\x12\x16\n\x0cvalue_double\x18\x04 \x01(\x01H\x00\x12K\n\x02op\x18\x05 \x01(\x0e\x32?.google.cloud.aiplatform.container.v1.NumericNamespace.Operator"x\n\x08Operator\x12\x18\n\x14OPERATOR_UNSPECIFIED\x10\x00\x12\x08\n\x04LESS\x10\x01\x12\x0e\n\nLESS_EQUAL\x10\x02\x12\t\n\x05\x45QUAL\x10\x03\x12\x11\n\rGREATER_EQUAL\x10\x04\x12\x0b\n\x07GREATER\x10\x05\x12\r\n\tNOT_EQUAL\x10\x06\x42\x07\n\x05Value2\xa2\x03\n\x0cMatchService\x12r\n\x05Match\x12\x32.google.cloud.aiplatform.container.v1.MatchRequest\x1a\x33.google.cloud.aiplatform.container.v1.MatchResponse"\x00\x12\x81\x01\n\nBatchMatch\x12\x37.google.cloud.aiplatform.container.v1.BatchMatchRequest\x1a\x38.google.cloud.aiplatform.container.v1.BatchMatchResponse"\x00\x12\x99\x01\n\x12\x42\x61tchGetEmbeddings\x12?.google.cloud.aiplatform.container.v1.BatchGetEmbeddingsRequest\x1a@.google.cloud.aiplatform.container.v1.BatchGetEmbeddingsResponse"\x00\x62\x06proto3' ) _MATCHREQUEST = DESCRIPTOR.message_types_by_name["MatchRequest"] +_MATCHREQUEST_SPARSEEMBEDDING = _MATCHREQUEST.nested_types_by_name["SparseEmbedding"] +_MATCHREQUEST_RRF = _MATCHREQUEST.nested_types_by_name["RRF"] _EMBEDDING = DESCRIPTOR.message_types_by_name["Embedding"] _MATCHRESPONSE = DESCRIPTOR.message_types_by_name["MatchResponse"] _MATCHRESPONSE_NEIGHBOR = _MATCHRESPONSE.nested_types_by_name["Neighbor"] @@ -61,12 +63,32 @@ "MatchRequest", (_message.Message,), { + "SparseEmbedding": _reflection.GeneratedProtocolMessageType( + "SparseEmbedding", + (_message.Message,), + { + "DESCRIPTOR": _MATCHREQUEST_SPARSEEMBEDDING, + "__module__": "google.cloud.aiplatform.matching_engine._protos.match_service_pb2" + # @@protoc_insertion_point(class_scope:google.cloud.aiplatform.container.v1.MatchRequest.SparseEmbedding) + }, + ), + "RRF": _reflection.GeneratedProtocolMessageType( + "RRF", + (_message.Message,), + { + "DESCRIPTOR": _MATCHREQUEST_RRF, + "__module__": "google.cloud.aiplatform.matching_engine._protos.match_service_pb2" + # @@protoc_insertion_point(class_scope:google.cloud.aiplatform.container.v1.MatchRequest.RRF) + }, + ), "DESCRIPTOR": _MATCHREQUEST, "__module__": "google.cloud.aiplatform.matching_engine._protos.match_service_pb2" # @@protoc_insertion_point(class_scope:google.cloud.aiplatform.container.v1.MatchRequest) }, ) _sym_db.RegisterMessage(MatchRequest) +_sym_db.RegisterMessage(MatchRequest.SparseEmbedding) +_sym_db.RegisterMessage(MatchRequest.RRF) Embedding = _reflection.GeneratedProtocolMessageType( "Embedding", @@ -191,31 +213,35 @@ DESCRIPTOR._options = None _MATCHREQUEST._serialized_start = 135 - _MATCHREQUEST._serialized_end = 589 - _EMBEDDING._serialized_start = 592 - _EMBEDDING._serialized_end = 813 - _MATCHRESPONSE._serialized_start = 816 - _MATCHRESPONSE._serialized_end = 1050 - _MATCHRESPONSE_NEIGHBOR._serialized_start = 982 - _MATCHRESPONSE_NEIGHBOR._serialized_end = 1050 - _BATCHGETEMBEDDINGSREQUEST._serialized_start = 1052 - _BATCHGETEMBEDDINGSREQUEST._serialized_end = 1118 - _BATCHGETEMBEDDINGSRESPONSE._serialized_start = 1120 - _BATCHGETEMBEDDINGSRESPONSE._serialized_end = 1217 - _BATCHMATCHREQUEST._serialized_start = 1220 - _BATCHMATCHREQUEST._serialized_end = 1497 - _BATCHMATCHREQUEST_BATCHMATCHREQUESTPERINDEX._serialized_start = 1343 - _BATCHMATCHREQUEST_BATCHMATCHREQUESTPERINDEX._serialized_end = 1497 - _BATCHMATCHRESPONSE._serialized_start = 1500 - _BATCHMATCHRESPONSE._serialized_end = 1790 - _BATCHMATCHRESPONSE_BATCHMATCHRESPONSEPERINDEX._serialized_start = 1627 - _BATCHMATCHRESPONSE_BATCHMATCHRESPONSEPERINDEX._serialized_end = 1790 - _NAMESPACE._serialized_start = 1792 - _NAMESPACE._serialized_end = 1860 - _NUMERICNAMESPACE._serialized_start = 1863 - _NUMERICNAMESPACE._serialized_end = 2171 - _NUMERICNAMESPACE_OPERATOR._serialized_start = 2042 - _NUMERICNAMESPACE_OPERATOR._serialized_end = 2162 - _MATCHSERVICE._serialized_start = 2174 - _MATCHSERVICE._serialized_end = 2592 + _MATCHREQUEST._serialized_end = 844 + _MATCHREQUEST_SPARSEEMBEDDING._serialized_start = 756 + _MATCHREQUEST_SPARSEEMBEDDING._serialized_end = 811 + _MATCHREQUEST_RRF._serialized_start = 813 + _MATCHREQUEST_RRF._serialized_end = 833 + _EMBEDDING._serialized_start = 847 + _EMBEDDING._serialized_end = 1068 + _MATCHRESPONSE._serialized_start = 1071 + _MATCHRESPONSE._serialized_end = 1305 + _MATCHRESPONSE_NEIGHBOR._serialized_start = 1237 + _MATCHRESPONSE_NEIGHBOR._serialized_end = 1305 + _BATCHGETEMBEDDINGSREQUEST._serialized_start = 1307 + _BATCHGETEMBEDDINGSREQUEST._serialized_end = 1373 + _BATCHGETEMBEDDINGSRESPONSE._serialized_start = 1375 + _BATCHGETEMBEDDINGSRESPONSE._serialized_end = 1472 + _BATCHMATCHREQUEST._serialized_start = 1475 + _BATCHMATCHREQUEST._serialized_end = 1752 + _BATCHMATCHREQUEST_BATCHMATCHREQUESTPERINDEX._serialized_start = 1598 + _BATCHMATCHREQUEST_BATCHMATCHREQUESTPERINDEX._serialized_end = 1752 + _BATCHMATCHRESPONSE._serialized_start = 1755 + _BATCHMATCHRESPONSE._serialized_end = 2045 + _BATCHMATCHRESPONSE_BATCHMATCHRESPONSEPERINDEX._serialized_start = 1882 + _BATCHMATCHRESPONSE_BATCHMATCHRESPONSEPERINDEX._serialized_end = 2045 + _NAMESPACE._serialized_start = 2047 + _NAMESPACE._serialized_end = 2115 + _NUMERICNAMESPACE._serialized_start = 2118 + _NUMERICNAMESPACE._serialized_end = 2426 + _NUMERICNAMESPACE_OPERATOR._serialized_start = 2297 + _NUMERICNAMESPACE_OPERATOR._serialized_end = 2417 + _MATCHSERVICE._serialized_start = 2429 + _MATCHSERVICE._serialized_end = 2847 # @@protoc_insertion_point(module_scope) From d689331af5172cdfe7428333536954e8339f8ab4 Mon Sep 17 00:00:00 2001 From: Zhenyi Qi Date: Thu, 6 Jun 2024 08:57:09 -0700 Subject: [PATCH 29/36] fix: ensure model starts with publishers/ when users provide resource path from models/ PiperOrigin-RevId: 640914707 --- tests/unit/vertexai/test_generative_models.py | 4 ++++ vertexai/generative_models/_generative_models.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/unit/vertexai/test_generative_models.py b/tests/unit/vertexai/test_generative_models.py index 6ad0c01319..fd988dd67a 100644 --- a/tests/unit/vertexai/test_generative_models.py +++ b/tests/unit/vertexai/test_generative_models.py @@ -422,6 +422,7 @@ def test_generative_model_constructor_model_name( model1._prediction_resource_name == project_location_prefix + "publishers/google/models/" + model_name1 ) + assert model1._model_name == "publishers/google/models/gemini-pro" model_name2 = "models/gemini-pro" model2 = generative_models.GenerativeModel(model_name2) @@ -429,10 +430,12 @@ def test_generative_model_constructor_model_name( model2._prediction_resource_name == project_location_prefix + "publishers/google/" + model_name2 ) + assert model2._model_name == "publishers/google/models/gemini-pro" model_name3 = "publishers/some_publisher/models/some_model" model3 = generative_models.GenerativeModel(model_name3) assert model3._prediction_resource_name == project_location_prefix + model_name3 + assert model3._model_name == "publishers/some_publisher/models/some_model" model_name4 = ( f"projects/{_TEST_PROJECT2}/locations/{_TEST_LOCATION2}/endpoints/endpoint1" @@ -440,6 +443,7 @@ def test_generative_model_constructor_model_name( model4 = generative_models.GenerativeModel(model_name4) assert model4._prediction_resource_name == model_name4 assert _TEST_LOCATION2 in model4._prediction_client._api_endpoint + assert model4._model_name == model_name4 with pytest.raises(ValueError): generative_models.GenerativeModel("foo/bar/models/gemini-pro") diff --git a/vertexai/generative_models/_generative_models.py b/vertexai/generative_models/_generative_models.py index 7dddf8e649..168c327bcd 100644 --- a/vertexai/generative_models/_generative_models.py +++ b/vertexai/generative_models/_generative_models.py @@ -110,7 +110,7 @@ def _reconcile_model_name(model_name: str, project: str, location: str) -> str: if "/" not in model_name: return f"publishers/google/models/{model_name}" elif model_name.startswith("models/"): - return f"projects/{project}/locations/{location}/publishers/google/{model_name}" + return f"publishers/google/{model_name}" elif model_name.startswith("publishers/") or model_name.startswith("projects/"): return model_name else: From afdb282a1ff920056af736868b4bd13a27ac3956 Mon Sep 17 00:00:00 2001 From: Alexey Volkov Date: Thu, 6 Jun 2024 09:00:40 -0700 Subject: [PATCH 30/36] chore: Remove unnecessary module re-export from the `vertexai` module PiperOrigin-RevId: 640915678 --- samples/model-builder/conftest.py | 8 ++++---- .../create_bigtable_feature_online_store_sample.py | 4 ++-- ...create_optimized_public_feature_online_store_sample.py | 4 ++-- vertexai/__init__.py | 2 -- vertexai/resources/__init__.py | 2 -- 5 files changed, 8 insertions(+), 12 deletions(-) diff --git a/samples/model-builder/conftest.py b/samples/model-builder/conftest.py index 949b620d22..9350bec29b 100644 --- a/samples/model-builder/conftest.py +++ b/samples/model-builder/conftest.py @@ -15,7 +15,7 @@ from unittest.mock import MagicMock, patch from google.cloud import aiplatform -import vertexai +from vertexai.resources import preview as preview_resources import pytest @@ -694,14 +694,14 @@ def mock_write_feature_values(mock_entity_type): @pytest.fixture def mock_feature_online_store(): - mock = MagicMock(vertexai.resources.preview.FeatureOnlineStore) + mock = MagicMock(preview_resources.FeatureOnlineStore) yield mock @pytest.fixture def mock_create_feature_online_store(mock_feature_online_store): with patch.object( - vertexai.resources.preview.FeatureOnlineStore, "create_bigtable_store" + preview_resources.FeatureOnlineStore, "create_bigtable_store" ) as mock_create_feature_online_store: mock_create_feature_online_store.return_value = mock_feature_online_store yield mock_create_feature_online_store @@ -710,7 +710,7 @@ def mock_create_feature_online_store(mock_feature_online_store): @pytest.fixture def mock_create_optimized_public_online_store(mock_feature_online_store): with patch.object( - vertexai.resources.preview.FeatureOnlineStore, "create_optimized_store" + preview_resources.FeatureOnlineStore, "create_optimized_store" ) as mock_create_optimized_store: mock_create_optimized_store.return_value = mock_feature_online_store yield mock_create_optimized_store diff --git a/samples/model-builder/feature_store/create_bigtable_feature_online_store_sample.py b/samples/model-builder/feature_store/create_bigtable_feature_online_store_sample.py index 69e883ee70..8d69285962 100644 --- a/samples/model-builder/feature_store/create_bigtable_feature_online_store_sample.py +++ b/samples/model-builder/feature_store/create_bigtable_feature_online_store_sample.py @@ -15,7 +15,7 @@ # [START aiplatform_sdk_create_bigtable_feature_online_store_sample] from google.cloud import aiplatform -import vertexai +from vertexai.resources.preview import feature_store def create_bigtable_feature_online_store_sample( @@ -24,7 +24,7 @@ def create_bigtable_feature_online_store_sample( feature_online_store_id: str, ): aiplatform.init(project=project, location=location) - fos = vertexai.resources.preview.FeatureOnlineStore.create_bigtable_store( + fos = feature_store.FeatureOnlineStore.create_bigtable_store( feature_online_store_id ) return fos diff --git a/samples/model-builder/feature_store/create_optimized_public_feature_online_store_sample.py b/samples/model-builder/feature_store/create_optimized_public_feature_online_store_sample.py index 45a3f177dd..94100b2a1c 100644 --- a/samples/model-builder/feature_store/create_optimized_public_feature_online_store_sample.py +++ b/samples/model-builder/feature_store/create_optimized_public_feature_online_store_sample.py @@ -15,7 +15,7 @@ # [START aiplatform_sdk_create_optimized_public_feature_online_store_sample] from google.cloud import aiplatform -import vertexai +from vertexai.resources.preview import feature_store def create_optimized_public_feature_online_store_sample( @@ -24,7 +24,7 @@ def create_optimized_public_feature_online_store_sample( feature_online_store_id: str, ): aiplatform.init(project=project, location=location) - fos = vertexai.resources.preview.FeatureOnlineStore.create_optimized_store( + fos = feature_store.FeatureOnlineStore.create_optimized_store( feature_online_store_id ) return fos diff --git a/vertexai/__init__.py b/vertexai/__init__.py index c2558bad09..8f73185c54 100644 --- a/vertexai/__init__.py +++ b/vertexai/__init__.py @@ -20,10 +20,8 @@ from google.cloud.aiplatform import init from vertexai import preview -from vertexai import resources __all__ = [ "init", "preview", - "resources", ] diff --git a/vertexai/resources/__init__.py b/vertexai/resources/__init__.py index 17b546129d..f3b85e0c45 100644 --- a/vertexai/resources/__init__.py +++ b/vertexai/resources/__init__.py @@ -17,7 +17,6 @@ """The vertexai resources module.""" from google.cloud.aiplatform import initializer -from vertexai.resources import preview from google.cloud.aiplatform.datasets import ( ImageDataset, @@ -178,5 +177,4 @@ "TimeSeriesDataset", "TimeSeriesDenseEncoderForecastingTrainingJob", "VideoDataset", - "preview", ) From 5ceb8f767a1b84bd9469ed5274ecbff35f2c3d17 Mon Sep 17 00:00:00 2001 From: Sasha Sobran Date: Thu, 6 Jun 2024 09:54:44 -0700 Subject: [PATCH 31/36] chore: remove data science package tests PiperOrigin-RevId: 640931952 --- .../system/aiplatform/test_language_models.py | 7 +- .../system/vertexai/test_bigframes_sklearn.py | 190 --------------- .../vertexai/test_bigframes_tensorflow.py | 130 ----------- tests/system/vertexai/test_pytorch.py | 216 ------------------ tests/system/vertexai/test_sklearn.py | 203 ---------------- tests/system/vertexai/test_tensorflow.py | 182 --------------- 6 files changed, 3 insertions(+), 925 deletions(-) delete mode 100644 tests/system/vertexai/test_bigframes_sklearn.py delete mode 100644 tests/system/vertexai/test_bigframes_tensorflow.py delete mode 100644 tests/system/vertexai/test_pytorch.py delete mode 100644 tests/system/vertexai/test_sklearn.py delete mode 100644 tests/system/vertexai/test_tensorflow.py diff --git a/tests/system/aiplatform/test_language_models.py b/tests/system/aiplatform/test_language_models.py index 7327a28d9c..6756ef651c 100644 --- a/tests/system/aiplatform/test_language_models.py +++ b/tests/system/aiplatform/test_language_models.py @@ -24,7 +24,6 @@ from google.cloud.aiplatform.compat.types import ( job_state as gca_job_state, ) -import vertexai from tests.system.aiplatform import e2e_base from google.cloud.aiplatform.utils import gcs_utils from vertexai import language_models @@ -128,15 +127,15 @@ def test_text_generation_streaming(self, api_transport): assert response.text or response.is_blocked @pytest.mark.parametrize("api_transport", ["grpc", "rest"]) - def test_preview_text_embedding_top_level_from_pretrained(self, api_transport): + def test_preview_text_embedding_from_pretrained(self, api_transport): aiplatform.init( project=e2e_base._PROJECT, location=e2e_base._LOCATION, api_transport=api_transport, ) - model = vertexai.preview.from_pretrained( - foundation_model_name="google/text-bison@001" + model = preview_language_models.TextEmbeddingModel.from_pretrained( + "google/text-bison@001" ) response = model.predict( diff --git a/tests/system/vertexai/test_bigframes_sklearn.py b/tests/system/vertexai/test_bigframes_sklearn.py deleted file mode 100644 index addc4f8363..0000000000 --- a/tests/system/vertexai/test_bigframes_sklearn.py +++ /dev/null @@ -1,190 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import os -from unittest import mock - -from google.cloud import aiplatform -import vertexai -from tests.system.aiplatform import e2e_base -from vertexai.preview._workflow.executor import training -from vertexai.preview._workflow.serialization_engine import ( - serializers, -) -import numpy as np - -import pytest -from sklearn.linear_model import LogisticRegression -from sklearn.preprocessing import StandardScaler - -import bigframes.pandas as bf -from bigframes.ml.model_selection import train_test_split as bf_train_test_split - - -bf.options.bigquery.location = "us" # Dataset is in 'us' not 'us-central1' -bf.options.bigquery.project = e2e_base._PROJECT - - -# Wrap classes -StandardScaler = vertexai.preview.remote(StandardScaler) -LogisticRegression = vertexai.preview.remote(LogisticRegression) - - -@mock.patch.object( - training, - "VERTEX_AI_DEPENDENCY_PATH", - "google-cloud-aiplatform[preview] @ git+https://github.com/googleapis/" - f"python-aiplatform.git@{os.environ['KOKORO_GIT_COMMIT']}" - if os.environ.get("KOKORO_GIT_COMMIT") - else "google-cloud-aiplatform[preview] @ git+https://github.com/googleapis/python-aiplatform.git@main", -) -@mock.patch.object( - training, - "VERTEX_AI_DEPENDENCY_PATH_AUTOLOGGING", - "google-cloud-aiplatform[preview,autologging] @ git+https://github.com/googleapis/" - f"python-aiplatform.git@{os.environ['KOKORO_GIT_COMMIT']}" - if os.environ.get("KOKORO_GIT_COMMIT") - else "google-cloud-aiplatform[preview,autologging] @ git+https://github.com/googleapis/python-aiplatform.git@main", -) -# To avoid flaky test due to autolog enabled in parallel tests -@mock.patch.object(vertexai.preview.initializer._Config, "autolog", False) -@pytest.mark.usefixtures( - "prepare_staging_bucket", "delete_staging_bucket", "tear_down_resources" -) -class TestRemoteExecutionBigframesSklearn(e2e_base.TestEndToEnd): - - _temp_prefix = "temp-vertexai-remote-execution" - - def test_remote_execution_sklearn(self, shared_state): - # Initialize vertexai - vertexai.init( - project=e2e_base._PROJECT, - location=e2e_base._LOCATION, - staging_bucket=f"gs://{shared_state['staging_bucket_name']}", - ) - - # Prepare dataset - df = bf.read_gbq("bigquery-public-data.ml_datasets.iris") - species_categories = { - "versicolor": 0, - "virginica": 1, - "setosa": 2, - } - df["species"] = df["species"].map(species_categories) - index_col = "index" - df.index.name = index_col - feature_columns = df[ - ["sepal_length", "sepal_width", "petal_length", "petal_width"] - ] - label_columns = df[["species"]] - train_X, test_X, train_y, _ = bf_train_test_split( - feature_columns, label_columns, test_size=0.2 - ) - - # Remote fit_transform on bf train dataset - vertexai.preview.init(remote=True) - transformer = StandardScaler() - transformer.fit_transform.vertex.remote_config.display_name = ( - self._make_display_name("bigframes-fit-transform") - ) - X_train = transformer.fit_transform(train_X) - - # Assert the right serializer is being used - remote_job = aiplatform.CustomJob.list( - filter=f'display_name="{transformer.fit_transform.vertex.remote_config.display_name}"' - )[0] - base_path = remote_job.job_spec.base_output_directory.output_uri_prefix - - input_estimator_metadata = serializers._get_metadata( - os.path.join(base_path, "input/input_estimator") - ) - assert input_estimator_metadata["serializer"] == "SklearnEstimatorSerializer" - - output_estimator_metadata = serializers._get_metadata( - os.path.join(base_path, "output/output_estimator") - ) - assert output_estimator_metadata["serializer"] == "SklearnEstimatorSerializer" - - train_x_metadata = serializers._get_metadata(os.path.join(base_path, "input/X")) - assert train_x_metadata["serializer"] == "BigframeSerializer" - assert train_x_metadata["framework"] == "sklearn" - - shared_state["resources"] = [remote_job] - - assert type(X_train) is np.ndarray - assert X_train.shape == (120, 4) - - # Remote transform on bf test dataset - transformer.transform.vertex.remote_config.display_name = ( - self._make_display_name("bigframes-transform") - ) - X_test = transformer.transform(test_X) - - # Assert the right serializer is being used - remote_job = aiplatform.CustomJob.list( - filter=f'display_name="{transformer.transform.vertex.remote_config.display_name}"' - )[0] - base_path = remote_job.job_spec.base_output_directory.output_uri_prefix - - input_estimator_metadata = serializers._get_metadata( - os.path.join(base_path, "input/input_estimator") - ) - assert input_estimator_metadata["serializer"] == "SklearnEstimatorSerializer" - - test_x_metadata = serializers._get_metadata(os.path.join(base_path, "input/X")) - assert test_x_metadata["serializer"] == "BigframeSerializer" - assert train_x_metadata["framework"] == "sklearn" - - shared_state["resources"].append(remote_job) - - assert type(X_test) is np.ndarray - assert X_test.shape == (30, 4) - - # Remote training on sklearn - vertexai.preview.init(remote=True) - - model = LogisticRegression(warm_start=True) - model.fit.vertex.remote_config.display_name = self._make_display_name( - "bigframes-sklearn-training" - ) - model.fit(train_X, train_y) - - # Assert the right serializer is being used - remote_job = aiplatform.CustomJob.list( - filter=f'display_name="{model.fit.vertex.remote_config.display_name}"' - )[0] - base_path = remote_job.job_spec.base_output_directory.output_uri_prefix - - input_estimator_metadata = serializers._get_metadata( - os.path.join(base_path, "input/input_estimator") - ) - assert input_estimator_metadata["serializer"] == "SklearnEstimatorSerializer" - - output_estimator_metadata = serializers._get_metadata( - os.path.join(base_path, "output/output_estimator") - ) - assert output_estimator_metadata["serializer"] == "SklearnEstimatorSerializer" - - train_x_metadata = serializers._get_metadata(os.path.join(base_path, "input/X")) - assert train_x_metadata["serializer"] == "BigframeSerializer" - assert train_x_metadata["framework"] == "sklearn" - - train_y_metadata = serializers._get_metadata(os.path.join(base_path, "input/y")) - assert train_y_metadata["serializer"] == "BigframeSerializer" - assert train_y_metadata["framework"] == "sklearn" - - shared_state["resources"].append(remote_job) diff --git a/tests/system/vertexai/test_bigframes_tensorflow.py b/tests/system/vertexai/test_bigframes_tensorflow.py deleted file mode 100644 index 5759008cbb..0000000000 --- a/tests/system/vertexai/test_bigframes_tensorflow.py +++ /dev/null @@ -1,130 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import os -from unittest import mock - -from google.cloud import aiplatform -import vertexai -from tests.system.aiplatform import e2e_base -from vertexai.preview._workflow.executor import training -from vertexai.preview._workflow.serialization_engine import ( - serializers, -) -import pytest -from tensorflow import keras - -import bigframes.pandas as bf -from bigframes.ml.model_selection import train_test_split as bf_train_test_split - - -bf.options.bigquery.location = "us" # Dataset is in 'us' not 'us-central1' -bf.options.bigquery.project = e2e_base._PROJECT - - -# Wrap classes -keras.Sequential = vertexai.preview.remote(keras.Sequential) - - -@mock.patch.object( - training, - "VERTEX_AI_DEPENDENCY_PATH", - "google-cloud-aiplatform[preview] @ git+https://github.com/googleapis/" - f"python-aiplatform.git@{os.environ['KOKORO_GIT_COMMIT']}" - if os.environ.get("KOKORO_GIT_COMMIT") - else "google-cloud-aiplatform[preview] @ git+https://github.com/googleapis/python-aiplatform.git@main", -) -@mock.patch.object( - training, - "VERTEX_AI_DEPENDENCY_PATH_AUTOLOGGING", - "google-cloud-aiplatform[preview,autologging] @ git+https://github.com/googleapis/" - f"python-aiplatform.git@{os.environ['KOKORO_GIT_COMMIT']}" - if os.environ.get("KOKORO_GIT_COMMIT") - else "google-cloud-aiplatform[preview,autologging] @ git+https://github.com/googleapis/python-aiplatform.git@main", -) -# To avoid flaky test due to autolog enabled in parallel tests -@mock.patch.object(vertexai.preview.initializer._Config, "autolog", False) -@pytest.mark.usefixtures( - "prepare_staging_bucket", "delete_staging_bucket", "tear_down_resources" -) -class TestRemoteExecutionBigframesTensorflow(e2e_base.TestEndToEnd): - _temp_prefix = "temp-vertexai-remote-execution" - - # TODO(b/313893962): Re-enable after fixing the broken test. - @pytest.mark.skip( - reason="Known issue for removing tensorflow from the top level imports." - ) - def test_remote_execution_keras(self, shared_state): - # Initialize vertexai - vertexai.init( - project=e2e_base._PROJECT, - location=e2e_base._LOCATION, - staging_bucket=f"gs://{shared_state['staging_bucket_name']}", - ) - - # Prepare dataset - df = bf.read_gbq("bigquery-public-data.ml_datasets.iris") - - species_categories = { - "versicolor": 0, - "virginica": 1, - "setosa": 2, - } - df["species"] = df["species"].map(species_categories) - - train, _ = bf_train_test_split(df, test_size=0.2) - - # Remote GPU training on Keras - vertexai.preview.init(remote=True) - - model = keras.Sequential( - [keras.layers.Dense(5, input_shape=(4,)), keras.layers.Softmax()] - ) - model.compile(optimizer="adam", loss="mean_squared_error") - model.fit.vertex.set_config( - enable_cuda=True, - display_name=self._make_display_name("bigframes-keras-training"), - ) - model.fit.vertex.remote_config.serializer_args[train] = { - "batch_size": 10, - "target_col": "species", - } - - # Train model on Vertex - model.fit(train, epochs=10) - - # Assert the right serializer is being used - remote_job = aiplatform.CustomJob.list( - filter=f'display_name="{model.fit.vertex.remote_config.display_name}"' - )[0] - base_path = remote_job.job_spec.base_output_directory.output_uri_prefix - - input_estimator_metadata = serializers._get_metadata( - os.path.join(base_path, "input/input_estimator") - ) - assert input_estimator_metadata["serializer"] == "KerasModelSerializer" - - output_estimator_metadata = serializers._get_metadata( - os.path.join(base_path, "output/output_estimator") - ) - assert output_estimator_metadata["serializer"] == "KerasModelSerializer" - - train_x_metadata = serializers._get_metadata(os.path.join(base_path, "input/x")) - assert train_x_metadata["serializer"] == "BigframeSerializer" - assert train_x_metadata["framework"] == "tensorflow" - - shared_state["resources"] = [remote_job] diff --git a/tests/system/vertexai/test_pytorch.py b/tests/system/vertexai/test_pytorch.py deleted file mode 100644 index 9c6ab8b606..0000000000 --- a/tests/system/vertexai/test_pytorch.py +++ /dev/null @@ -1,216 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import os -from unittest import mock - -from google.cloud import aiplatform -import vertexai -from tests.system.aiplatform import e2e_base -from vertexai.preview._workflow.executor import training -from vertexai.preview._workflow.serialization_engine import ( - serializers, -) -import pytest -from sklearn.datasets import load_iris -import torch -from sklearn.model_selection import train_test_split -from sklearn.preprocessing import StandardScaler - - -@mock.patch.object( - training, - "VERTEX_AI_DEPENDENCY_PATH", - "google-cloud-aiplatform[preview] @ git+https://github.com/googleapis/" - f"python-aiplatform.git@{os.environ['KOKORO_GIT_COMMIT']}" - if os.environ.get("KOKORO_GIT_COMMIT") - else "google-cloud-aiplatform[preview] @ git+https://github.com/googleapis/python-aiplatform.git@main", -) -@mock.patch.object( - training, - "VERTEX_AI_DEPENDENCY_PATH_AUTOLOGGING", - "google-cloud-aiplatform[preview,autologging] @ git+https://github.com/googleapis/" - f"python-aiplatform.git@{os.environ['KOKORO_GIT_COMMIT']}" - if os.environ.get("KOKORO_GIT_COMMIT") - else "google-cloud-aiplatform[preview,autologging] @ git+https://github.com/googleapis/python-aiplatform.git@main", -) -# To avoid flaky test due to autolog enabled in parallel tests -@mock.patch.object(vertexai.preview.initializer._Config, "autolog", False) -@pytest.mark.usefixtures( - "prepare_staging_bucket", "delete_staging_bucket", "tear_down_resources" -) -class TestRemoteExecutionPytorch(e2e_base.TestEndToEnd): - - _temp_prefix = "temp-vertexai-remote-execution" - - def test_remote_execution_pytorch(self, shared_state): - # Define the pytorch custom model - class TorchLogisticRegression(vertexai.preview.VertexModel, torch.nn.Module): - def __init__(self, input_size: int, output_size: int): - torch.nn.Module.__init__(self) - vertexai.preview.VertexModel.__init__(self) - self.linear = torch.nn.Linear(input_size, output_size) - self.softmax = torch.nn.Softmax(dim=1) - - def forward(self, x): - return self.softmax(self.linear(x)) - - @vertexai.preview.developer.mark.train() - def train(self, dataloader, num_epochs, lr): - criterion = torch.nn.CrossEntropyLoss() - optimizer = torch.optim.SGD(self.parameters(), lr=lr) - - for t in range(num_epochs): - for idx, batch in enumerate(dataloader): - # move data to the same device as model - device = next(self.parameters()).device - x, y = batch[0].to(device), batch[1].to(device) - - optimizer.zero_grad() - pred = self(x) - loss = criterion(pred, y) - loss.backward() - optimizer.step() - - @vertexai.preview.developer.mark.predict() - def predict(self, X): - X = torch.tensor(X).to(torch.float32) - with torch.no_grad(): - pred = torch.argmax(self(X), dim=1) - return pred - - # Initialize vertexai - vertexai.init( - project=e2e_base._PROJECT, - location=e2e_base._LOCATION, - staging_bucket=f"gs://{shared_state['staging_bucket_name']}", - ) - - # Prepare dataset - dataset = load_iris() - - X, X_retrain, y, y_retrain = train_test_split( - dataset.data, dataset.target, test_size=0.60, random_state=42 - ) - X_train, X_test, y_train, y_test = train_test_split( - X, y, test_size=0.20, random_state=42 - ) - - transformer = StandardScaler() - X_train = transformer.fit_transform(X_train) - X_test = transformer.transform(X_test) - X_retrain = transformer.transform(X_retrain) - - train_loader = torch.utils.data.DataLoader( - torch.utils.data.TensorDataset( - torch.tensor(X_train).to(torch.float32), - torch.tensor(y_train), - ), - batch_size=10, - shuffle=True, - ) - - retrain_loader = torch.utils.data.DataLoader( - torch.utils.data.TensorDataset( - torch.tensor(X_retrain).to(torch.float32), - torch.tensor(y_retrain), - ), - batch_size=10, - shuffle=True, - ) - - # Remote CPU training on Torch custom model - vertexai.preview.init(remote=True) - - model = TorchLogisticRegression(4, 3) - model.train.vertex.remote_config.display_name = self._make_display_name( - "pytorch-cpu-training" - ) - model.train(train_loader, num_epochs=100, lr=0.05) - - # Assert the right serializer is being used - remote_job = aiplatform.CustomJob.list( - filter=f'display_name="{model.train.vertex.remote_config.display_name}"' - )[0] - base_path = remote_job.job_spec.base_output_directory.output_uri_prefix - - input_estimator_metadata = serializers._get_metadata( - os.path.join(base_path, "input/input_estimator") - ) - assert input_estimator_metadata["serializer"] == "TorchModelSerializer" - - output_estimator_metadata = serializers._get_metadata( - os.path.join(base_path, "output/output_estimator") - ) - assert output_estimator_metadata["serializer"] == "TorchModelSerializer" - - train_loader_metadata = serializers._get_metadata( - os.path.join(base_path, "input/dataloader") - ) - assert train_loader_metadata["serializer"] == "TorchDataLoaderSerializer" - - shared_state["resources"] = [remote_job] - - # Remote prediction on Torch custom model - model.predict.vertex.remote_config.display_name = self._make_display_name( - "pytorch-prediction" - ) - model.predict(X_test) - - # Add prediction job to teardown resource - remote_job = aiplatform.CustomJob.list( - filter=f'display_name="{model.predict.vertex.remote_config.display_name}"' - )[0] - shared_state["resources"].append(remote_job) - - # Register trained model - registered_model = vertexai.preview.register(model) - shared_state["resources"].append(registered_model) - - # Load the registered model - pulled_model = vertexai.preview.from_pretrained( - model_name=registered_model.resource_name - ) - - # Uptrain the pretrained model on CPU - pulled_model.train.vertex.remote_config.display_name = self._make_display_name( - "pytorch-cpu-uptraining" - ) - pulled_model.train(retrain_loader, num_epochs=100, lr=0.05) - - # Assert the right serializer is being used - remote_job = aiplatform.CustomJob.list( - filter=f'display_name="{pulled_model.train.vertex.remote_config.display_name}"' - )[0] - base_path = remote_job.job_spec.base_output_directory.output_uri_prefix - - input_estimator_metadata = serializers._get_metadata( - os.path.join(base_path, "input/input_estimator") - ) - assert input_estimator_metadata["serializer"] == "TorchModelSerializer" - - output_estimator_metadata = serializers._get_metadata( - os.path.join(base_path, "output/output_estimator") - ) - assert output_estimator_metadata["serializer"] == "TorchModelSerializer" - - train_loader_metadata = serializers._get_metadata( - os.path.join(base_path, "input/dataloader") - ) - assert train_loader_metadata["serializer"] == "TorchDataLoaderSerializer" - - shared_state["resources"].append(remote_job) diff --git a/tests/system/vertexai/test_sklearn.py b/tests/system/vertexai/test_sklearn.py deleted file mode 100644 index c458737126..0000000000 --- a/tests/system/vertexai/test_sklearn.py +++ /dev/null @@ -1,203 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import os -from unittest import mock - -from google.cloud import aiplatform -import vertexai -from tests.system.aiplatform import e2e_base -from vertexai.preview._workflow.executor import training -from vertexai.preview._workflow.serialization_engine import ( - serializers, -) -import pandas as pd -import pytest -from sklearn.datasets import load_iris -from sklearn.linear_model import LogisticRegression -from sklearn.model_selection import train_test_split -from sklearn.preprocessing import StandardScaler - - -# Wrap classes -StandardScaler = vertexai.preview.remote(StandardScaler) -LogisticRegression = vertexai.preview.remote(LogisticRegression) - - -@mock.patch.object( - training, - "VERTEX_AI_DEPENDENCY_PATH", - "google-cloud-aiplatform[preview] @ git+https://github.com/googleapis/" - f"python-aiplatform.git@{os.environ['KOKORO_GIT_COMMIT']}" - if os.environ.get("KOKORO_GIT_COMMIT") - else "google-cloud-aiplatform[preview] @ git+https://github.com/googleapis/python-aiplatform.git@main", -) -@mock.patch.object( - training, - "VERTEX_AI_DEPENDENCY_PATH_AUTOLOGGING", - "google-cloud-aiplatform[preview,autologging] @ git+https://github.com/googleapis/" - f"python-aiplatform.git@{os.environ['KOKORO_GIT_COMMIT']}" - if os.environ.get("KOKORO_GIT_COMMIT") - else "google-cloud-aiplatform[preview,autologging] @ git+https://github.com/googleapis/python-aiplatform.git@main", -) -# To avoid flaky test due to autolog enabled in parallel tests -@mock.patch.object(vertexai.preview.initializer._Config, "autolog", False) -@pytest.mark.usefixtures( - "prepare_staging_bucket", "delete_staging_bucket", "tear_down_resources" -) -class TestRemoteExecutionSklearn(e2e_base.TestEndToEnd): - - _temp_prefix = "temp-vertexai-remote-execution" - - def test_remote_execution_sklearn(self, shared_state): - # Initialize vertexai - vertexai.init( - project=e2e_base._PROJECT, - location=e2e_base._LOCATION, - staging_bucket=f"gs://{shared_state['staging_bucket_name']}", - ) - - # Prepare dataset - dataset = load_iris() - X, X_retrain, y, y_retrain = train_test_split( - dataset.data, dataset.target, test_size=0.60, random_state=42 - ) - X_train, X_test, y_train, y_test = train_test_split( - X, y, test_size=0.20, random_state=42 - ) - - # Remote fit_transform on train dataset - vertexai.preview.init(remote=True) - - transformer = StandardScaler() - transformer.fit_transform.vertex.set_config( - display_name=self._make_display_name("fit-transform"), - ) - X_train = transformer.fit_transform(X_train) - - # Assert the right serializer is being used - remote_job = aiplatform.CustomJob.list( - filter=f'display_name="{transformer.fit_transform.vertex.remote_config.display_name}"' - )[0] - base_path = remote_job.job_spec.base_output_directory.output_uri_prefix - - input_estimator_metadata = serializers._get_metadata( - os.path.join(base_path, "input/input_estimator") - ) - assert input_estimator_metadata["serializer"] == "SklearnEstimatorSerializer" - - output_estimator_metadata = serializers._get_metadata( - os.path.join(base_path, "output/output_estimator") - ) - assert output_estimator_metadata["serializer"] == "SklearnEstimatorSerializer" - - shared_state["resources"] = [remote_job] - - # Remote transform on test dataset - transformer.transform.vertex.set_config( - display_name=self._make_display_name("transform"), - ) - X_test = transformer.transform(X_test) - - # Assert the right serializer is being used - remote_job = aiplatform.CustomJob.list( - filter=f'display_name="{transformer.transform.vertex.remote_config.display_name}"' - )[0] - base_path = remote_job.job_spec.base_output_directory.output_uri_prefix - - input_estimator_metadata = serializers._get_metadata( - os.path.join(base_path, "input/input_estimator") - ) - assert input_estimator_metadata["serializer"] == "SklearnEstimatorSerializer" - - shared_state["resources"].append(remote_job) - - # Local transform on retrain data - vertexai.preview.init(remote=False) - X_retrain = transformer.transform(X_retrain) - # Transform retrain dataset to pandas dataframe - X_retrain_df = pd.DataFrame(X_retrain, columns=dataset.feature_names) - y_retrain_df = pd.DataFrame(y_retrain, columns=["class"]) - - # Remote training on sklearn - vertexai.preview.init(remote=True) - - model = LogisticRegression(warm_start=True) - model.fit.vertex.remote_config.display_name = self._make_display_name( - "sklearn-training" - ) - model.fit(X_train, y_train) - - # Assert the right serializer is being used - remote_job = aiplatform.CustomJob.list( - filter=f'display_name="{model.fit.vertex.remote_config.display_name}"' - )[0] - base_path = remote_job.job_spec.base_output_directory.output_uri_prefix - - input_estimator_metadata = serializers._get_metadata( - os.path.join(base_path, "input/input_estimator") - ) - assert input_estimator_metadata["serializer"] == "SklearnEstimatorSerializer" - - output_estimator_metadata = serializers._get_metadata( - os.path.join(base_path, "output/output_estimator") - ) - assert output_estimator_metadata["serializer"] == "SklearnEstimatorSerializer" - - shared_state["resources"].append(remote_job) - - # Remote prediction on sklearn - model.predict.vertex.remote_config.display_name = self._make_display_name( - "sklearn-prediction" - ) - model.predict(X_test) - - # Add prediction job to teardown resource - remote_job = aiplatform.CustomJob.list( - filter=f'display_name="{model.predict.vertex.remote_config.display_name}"' - )[0] - shared_state["resources"].append(remote_job) - - # Register trained model - registered_model = vertexai.preview.register(model) - shared_state["resources"].append(registered_model) - - # Load the registered model - pulled_model = vertexai.preview.from_pretrained( - model_name=registered_model.resource_name - ) - - # Retrain model with pandas df on Vertex - pulled_model.fit(X_retrain_df, y_retrain_df) - - # Assert the right serializer is being used - remote_job = aiplatform.CustomJob.list( - filter=f'display_name="{pulled_model.fit.vertex.remote_config.display_name}"' - )[0] - base_path = remote_job.job_spec.base_output_directory.output_uri_prefix - - input_estimator_metadata = serializers._get_metadata( - os.path.join(base_path, "input/input_estimator") - ) - assert input_estimator_metadata["serializer"] == "SklearnEstimatorSerializer" - - output_estimator_metadata = serializers._get_metadata( - os.path.join(base_path, "output/output_estimator") - ) - assert output_estimator_metadata["serializer"] == "SklearnEstimatorSerializer" - - shared_state["resources"].append(remote_job) diff --git a/tests/system/vertexai/test_tensorflow.py b/tests/system/vertexai/test_tensorflow.py deleted file mode 100644 index 9f797703da..0000000000 --- a/tests/system/vertexai/test_tensorflow.py +++ /dev/null @@ -1,182 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import os -from unittest import mock - -from google.cloud import aiplatform -import vertexai -from tests.system.aiplatform import e2e_base -from vertexai.preview._workflow.executor import training -from vertexai.preview._workflow.serialization_engine import ( - serializers, -) -import pytest -from sklearn.datasets import load_iris -import tensorflow as tf -from tensorflow import keras -from sklearn.model_selection import train_test_split -from sklearn.preprocessing import StandardScaler - - -# Wrap classes -keras.Sequential = vertexai.preview.remote(keras.Sequential) - - -@mock.patch.object( - training, - "VERTEX_AI_DEPENDENCY_PATH", - "google-cloud-aiplatform[preview] @ git+https://github.com/googleapis/" - f"python-aiplatform.git@{os.environ['KOKORO_GIT_COMMIT']}" - if os.environ.get("KOKORO_GIT_COMMIT") - else "google-cloud-aiplatform[preview] @ git+https://github.com/googleapis/python-aiplatform.git@main", -) -@mock.patch.object( - training, - "VERTEX_AI_DEPENDENCY_PATH_AUTOLOGGING", - "google-cloud-aiplatform[preview,autologging] @ git+https://github.com/googleapis/" - f"python-aiplatform.git@{os.environ['KOKORO_GIT_COMMIT']}" - if os.environ.get("KOKORO_GIT_COMMIT") - else "google-cloud-aiplatform[preview,autologging] @ git+https://github.com/googleapis/python-aiplatform.git@main", -) -# To avoid flaky test due to autolog enabled in parallel tests -@mock.patch.object(vertexai.preview.initializer._Config, "autolog", False) -@pytest.mark.usefixtures( - "prepare_staging_bucket", "delete_staging_bucket", "tear_down_resources" -) -class TestRemoteExecutionTensorflow(e2e_base.TestEndToEnd): - - _temp_prefix = "temp-vertexai-remote-execution" - - # TODO(b/313893962): Re-enable after fixing the broken test. - @pytest.mark.skip( - reason="Known issue for removing tensorflow from the top level imports." - ) - def test_remote_execution_keras(self, shared_state): - # Initialize vertexai - vertexai.init( - project=e2e_base._PROJECT, - location=e2e_base._LOCATION, - staging_bucket=f"gs://{shared_state['staging_bucket_name']}", - ) - - # Prepare dataset - dataset = load_iris() - - X, X_retrain, y, y_retrain = train_test_split( - dataset.data, dataset.target, test_size=0.60, random_state=42 - ) - X_train, X_test, y_train, y_test = train_test_split( - X, y, test_size=0.20, random_state=42 - ) - - transformer = StandardScaler() - X_train = transformer.fit_transform(X_train) - X_test = transformer.transform(X_test) - X_retrain = transformer.transform(X_retrain) - - tf_train_dataset = tf.data.Dataset.from_tensor_slices((X_train, y_train)) - tf_train_dataset = tf_train_dataset.shuffle(buffer_size=64).batch(32) - - tf_retrain_dataset = tf.data.Dataset.from_tensor_slices((X_retrain, y_retrain)) - tf_retrain_dataset = tf_retrain_dataset.shuffle(buffer_size=64).batch(32) - - tf_test_dataset = tf.data.Dataset.from_tensor_slices((X_test, y_test)) - tf_prediction_test_data = tf_test_dataset - tf_remote_prediction_test_data = tf_prediction_test_data.batch(32) - - # Remote GPU training on Keras - vertexai.preview.init(remote=True) - - model = keras.Sequential( - [keras.layers.Dense(5, input_shape=(4,)), keras.layers.Softmax()] - ) - model.compile(optimizer="adam", loss="mean_squared_error") - model.fit.vertex.set_config( - enable_cuda=True, display_name=self._make_display_name("keras-gpu-training") - ) - model.fit(tf_train_dataset, epochs=10) - - # Assert the right serializer is being used - remote_job = aiplatform.CustomJob.list( - filter=f'display_name="{model.fit.vertex.remote_config.display_name}"' - )[0] - base_path = remote_job.job_spec.base_output_directory.output_uri_prefix - - input_estimator_metadata = serializers._get_metadata( - os.path.join(base_path, "input/input_estimator") - ) - assert input_estimator_metadata["serializer"] == "KerasModelSerializer" - - output_estimator_metadata = serializers._get_metadata( - os.path.join(base_path, "output/output_estimator") - ) - assert output_estimator_metadata["serializer"] == "KerasModelSerializer" - - train_x_metadata = serializers._get_metadata(os.path.join(base_path, "input/x")) - assert train_x_metadata["serializer"] == "TFDatasetSerializer" - - shared_state["resources"] = [remote_job] - - # Remote prediction on keras - model.predict.vertex.remote_config.display_name = self._make_display_name( - "keras-prediction" - ) - model.predict(tf_remote_prediction_test_data) - - # Add prediction job to teardown resource - remote_job = aiplatform.CustomJob.list( - filter=f'display_name="{model.predict.vertex.remote_config.display_name}"' - )[0] - shared_state["resources"].append(remote_job) - - # Register trained model - registered_model = vertexai.preview.register(model) - shared_state["resources"].append(registered_model) - - # Load the registered model - pulled_model = vertexai.preview.from_pretrained( - model_name=registered_model.resource_name - ) - - # Uptrain the pretrained model on CPU - pulled_model.fit.vertex.remote_config.enable_cuda = False - pulled_model.fit.vertex.remote_config.display_name = self._make_display_name( - "keras-cpu-uptraining" - ) - pulled_model.fit(tf_retrain_dataset, epochs=10) - - # Assert the right serializer is being used - remote_job = aiplatform.CustomJob.list( - filter=f'display_name="{pulled_model.fit.vertex.remote_config.display_name}"' - )[0] - base_path = remote_job.job_spec.base_output_directory.output_uri_prefix - - input_estimator_metadata = serializers._get_metadata( - os.path.join(base_path, "input/input_estimator") - ) - assert input_estimator_metadata["serializer"] == "KerasModelSerializer" - - output_estimator_metadata = serializers._get_metadata( - os.path.join(base_path, "output/output_estimator") - ) - assert output_estimator_metadata["serializer"] == "KerasModelSerializer" - - train_x_metadata = serializers._get_metadata(os.path.join(base_path, "input/x")) - assert train_x_metadata["serializer"] == "TFDatasetSerializer" - - shared_state["resources"].append(remote_job) From 6592042de03bb33c3ac20ca3f195062433a6ebb4 Mon Sep 17 00:00:00 2001 From: Sasha Sobran Date: Thu, 6 Jun 2024 13:11:58 -0700 Subject: [PATCH 32/36] chore: unbreak preview test for TextGenerationModel PiperOrigin-RevId: 640997095 --- tests/system/aiplatform/test_language_models.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/system/aiplatform/test_language_models.py b/tests/system/aiplatform/test_language_models.py index 6756ef651c..2b6b0f5070 100644 --- a/tests/system/aiplatform/test_language_models.py +++ b/tests/system/aiplatform/test_language_models.py @@ -127,14 +127,14 @@ def test_text_generation_streaming(self, api_transport): assert response.text or response.is_blocked @pytest.mark.parametrize("api_transport", ["grpc", "rest"]) - def test_preview_text_embedding_from_pretrained(self, api_transport): + def test_preview_text_generation_from_pretrained(self, api_transport): aiplatform.init( project=e2e_base._PROJECT, location=e2e_base._LOCATION, api_transport=api_transport, ) - model = preview_language_models.TextEmbeddingModel.from_pretrained( + model = preview_language_models.TextGenerationModel.from_pretrained( "google/text-bison@001" ) From 37875b507f25c31ac4a84e4fefe3cbba565682e3 Mon Sep 17 00:00:00 2001 From: Amy Wu Date: Thu, 6 Jun 2024 15:18:39 -0700 Subject: [PATCH 33/36] feat: Enable Ray Job submission without VPC peering PiperOrigin-RevId: 641037130 --- .../aiplatform/vertex_ray/dashboard_sdk.py | 23 ++++++++++-- tests/unit/vertex_ray/test_dashboard_sdk.py | 36 +++++++++++++++++++ 2 files changed, 56 insertions(+), 3 deletions(-) diff --git a/google/cloud/aiplatform/vertex_ray/dashboard_sdk.py b/google/cloud/aiplatform/vertex_ray/dashboard_sdk.py index 90a670f72f..fd90fe8e6e 100644 --- a/google/cloud/aiplatform/vertex_ray/dashboard_sdk.py +++ b/google/cloud/aiplatform/vertex_ray/dashboard_sdk.py @@ -69,10 +69,27 @@ def get_job_submission_client_cluster_info( "RAY_HEAD_NODE_INTERNAL_IP", None ) if head_address is None: - raise RuntimeError( - "[Ray on Vertex AI]: Unable to obtain a response from the backend." + # No peering. Try to get the dashboard address. + dashboard_address = response.resource_runtime.access_uris.get( + "RAY_DASHBOARD_URI", None ) - + if dashboard_address is None: + raise RuntimeError( + "[Ray on Vertex AI]: Unable to obtain a response from the backend." + ) + if _validation_utils.valid_dashboard_address(dashboard_address): + bearer_token = _validation_utils.get_bearer_token() + if kwargs.get("headers", None) is None: + kwargs["headers"] = { + "Content-Type": "application/json", + "Authorization": "Bearer {}".format(bearer_token), + } + return oss_dashboard_sdk.get_job_submission_client_cluster_info( + address=dashboard_address, + _use_tls=True, + *args, + **kwargs, + ) # Assume that head node internal IP in a form of xxx.xxx.xxx.xxx:10001. # Ray-on-Vertex cluster serves the Dashboard at port 8888 instead of # the default 8251. diff --git a/tests/unit/vertex_ray/test_dashboard_sdk.py b/tests/unit/vertex_ray/test_dashboard_sdk.py index e752ddab33..21ddb574d9 100644 --- a/tests/unit/vertex_ray/test_dashboard_sdk.py +++ b/tests/unit/vertex_ray/test_dashboard_sdk.py @@ -44,6 +44,18 @@ def get_persistent_resource_status_running_mock(): yield get_persistent_resource +@pytest.fixture +def get_persistent_resource_status_running_byosa_public_mock(): + # Cluster with BYOSA and no peering + with mock.patch.object( + vertex_ray.util._gapic_utils, "get_persistent_resource" + ) as get_persistent_resource: + get_persistent_resource.return_value = ( + tc.ClusterConstants.TEST_RESPONSE_RUNNING_1_POOL_BYOSA + ) + yield get_persistent_resource + + @pytest.fixture def get_bearer_token_mock(): with mock.patch.object( @@ -112,3 +124,27 @@ def test_job_submission_client_cluster_info_with_dashboard_address( _use_tls=True, headers=tc.ClusterConstants.TEST_HEADERS, ) + + @pytest.mark.usefixtures( + "get_persistent_resource_status_running_byosa_public_mock", "google_auth_mock" + ) + def test_job_submission_client_cluster_info_with_cluster_name_byosa_public( + self, + ray_get_job_submission_client_cluster_info_mock, + get_bearer_token_mock, + get_project_number_mock, + ): + aiplatform.init(project=tc.ProjectConstants.TEST_GCP_PROJECT_ID) + + vertex_ray.get_job_submission_client_cluster_info( + tc.ClusterConstants.TEST_VERTEX_RAY_PR_ID + ) + get_project_number_mock.assert_called_once_with( + name="projects/{}".format(tc.ProjectConstants.TEST_GCP_PROJECT_ID) + ) + get_bearer_token_mock.assert_called_once_with() + ray_get_job_submission_client_cluster_info_mock.assert_called_once_with( + address=tc.ClusterConstants.TEST_VERTEX_RAY_DASHBOARD_ADDRESS, + _use_tls=True, + headers=tc.ClusterConstants.TEST_HEADERS, + ) From 3b3b8e4838081034d752955ce55c5042eb183aed Mon Sep 17 00:00:00 2001 From: Alexey Volkov Date: Thu, 6 Jun 2024 15:44:27 -0700 Subject: [PATCH 34/36] chore: GenAI - Explicit Caching - Made caching private Also fixed the system test issue. PiperOrigin-RevId: 641045530 --- .../system/vertexai/test_generative_models.py | 78 +++++++++---------- tests/unit/vertexai/test_caching.py | 2 +- tests/unit/vertexai/test_generative_models.py | 6 +- .../caching.py => _caching/_caching.py} | 0 .../generative_models/_generative_models.py | 4 +- 5 files changed, 45 insertions(+), 45 deletions(-) rename vertexai/{preview/caching.py => _caching/_caching.py} (100%) diff --git a/tests/system/vertexai/test_generative_models.py b/tests/system/vertexai/test_generative_models.py index 4fb9c877e0..7816831ebe 100644 --- a/tests/system/vertexai/test_generative_models.py +++ b/tests/system/vertexai/test_generative_models.py @@ -29,7 +29,7 @@ from vertexai.preview import ( generative_models as preview_generative_models, ) -from vertexai.preview import caching +from vertexai._caching import _caching as caching GEMINI_MODEL_NAME = "gemini-1.0-pro-002" GEMINI_VISION_MODEL_NAME = "gemini-1.0-pro-vision" @@ -100,44 +100,6 @@ def setup_method(self): credentials=credentials, ) - def test_generate_content_with_cached_content_from_text(self): - cached_content = caching.CachedContent.create( - model_name=GEMINI_15_0514_MODEL_NAME, - system_instruction="Please answer all the questions like a pirate.", - contents=[ - Content.from_dict( - { - "role": "user", - "parts": [ - { - "file_data": { - "mime_type": "application/pdf", - "file_uri": "gs://ucaip-samples-us-central1/sdk_system_test_resources/megatro-llm.pdf", - } - } - for _ in range(10) - ] - + [ - {"text": "Please try to summarize the previous contents."}, - ], - } - ) - ], - ) - - model = generative_models.GenerativeModel.from_cached_content( - cached_content=cached_content - ) - - response = model.generate_content( - "Why is sky blue?", - generation_config=generative_models.GenerationConfig(temperature=0), - ) - try: - assert response.text - finally: - cached_content.delete() - def test_generate_content_from_text(self): model = generative_models.GenerativeModel(GEMINI_MODEL_NAME) response = model.generate_content( @@ -479,3 +441,41 @@ def test_additional_request_metadata(self): generation_config=generative_models.GenerationConfig(temperature=0), ) assert response + + def test_generate_content_with_cached_content_from_text(self): + cached_content = caching.CachedContent.create( + model_name=GEMINI_15_0514_MODEL_NAME, + system_instruction="Please answer all the questions like a pirate.", + contents=[ + Content.from_dict( + { + "role": "user", + "parts": [ + { + "file_data": { + "mime_type": "application/pdf", + "file_uri": "gs://ucaip-samples-us-central1/sdk_system_test_resources/megatro-llm.pdf", + } + } + for _ in range(10) + ] + + [ + {"text": "Please try to summarize the previous contents."}, + ], + } + ) + ], + ) + + model = preview_generative_models.GenerativeModel._from_cached_content( + cached_content=cached_content + ) + + response = model.generate_content( + "Why is sky blue?", + generation_config=generative_models.GenerationConfig(temperature=0), + ) + try: + assert response.text + finally: + cached_content.delete() diff --git a/tests/unit/vertexai/test_caching.py b/tests/unit/vertexai/test_caching.py index d3906865be..30b0025110 100644 --- a/tests/unit/vertexai/test_caching.py +++ b/tests/unit/vertexai/test_caching.py @@ -19,7 +19,7 @@ import datetime import pytest import mock -from vertexai.preview import caching +from vertexai._caching import _caching as caching from google.cloud.aiplatform import initializer import vertexai from google.cloud.aiplatform_v1beta1.types.cached_content import ( diff --git a/tests/unit/vertexai/test_generative_models.py b/tests/unit/vertexai/test_generative_models.py index fd988dd67a..407aa64dc5 100644 --- a/tests/unit/vertexai/test_generative_models.py +++ b/tests/unit/vertexai/test_generative_models.py @@ -40,7 +40,7 @@ gen_ai_cache_service, ) from vertexai.generative_models import _function_calling_utils -from vertexai.preview import caching +from vertexai._caching import _caching as caching _TEST_PROJECT = "test-project" @@ -458,7 +458,7 @@ def test_generative_model_from_cached_content( "cached-content-id-in-from-cached-content-test" ) - model = preview_generative_models.GenerativeModel.from_cached_content( + model = preview_generative_models.GenerativeModel._from_cached_content( cached_content=cached_content ) @@ -586,7 +586,7 @@ def test_generate_content_with_cached_content( "cached-content-id-in-from-cached-content-test" ) - model = preview_generative_models.GenerativeModel.from_cached_content( + model = preview_generative_models.GenerativeModel._from_cached_content( cached_content=cached_content ) diff --git a/vertexai/preview/caching.py b/vertexai/_caching/_caching.py similarity index 100% rename from vertexai/preview/caching.py rename to vertexai/_caching/_caching.py diff --git a/vertexai/generative_models/_generative_models.py b/vertexai/generative_models/_generative_models.py index 168c327bcd..d427433c55 100644 --- a/vertexai/generative_models/_generative_models.py +++ b/vertexai/generative_models/_generative_models.py @@ -50,7 +50,7 @@ if TYPE_CHECKING: from vertexai.preview import rag - from vertexai.preview import caching + from vertexai._caching import _caching as caching try: from PIL import Image as PIL_Image # pylint: disable=g-import-not-at-top @@ -2602,7 +2602,7 @@ def start_chat( ) @classmethod - def from_cached_content( + def _from_cached_content( cls, cached_content: "caching.CachedContent", *, From 6cc45bbbea154d087c1dfe4756d4e15f21b1d844 Mon Sep 17 00:00:00 2001 From: Amy Wu Date: Thu, 6 Jun 2024 16:37:30 -0700 Subject: [PATCH 35/36] fix: Set upper bound of setuptools to unbreak public Colab for using vertex_ray namespace PiperOrigin-RevId: 641060544 --- setup.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/setup.py b/setup.py index c48ac95247..9930b4c380 100644 --- a/setup.py +++ b/setup.py @@ -107,6 +107,8 @@ "ray[default] >= 2.4, <= 2.9.3,!= 2.5.*,!= 2.6.*,!= 2.7.*,!=" " 2.8.*,!=2.9.0,!=2.9.1,!=2.9.2; python_version<'3.11'" ), + # To avoid ImportError: cannot import name 'packaging' from 'pkg)resources' + "setuptools < 70.0.0", # Ray Data v2.4 in Python 3.11 is broken, but got fixed in Ray v2.5. "ray[default] >= 2.5, <= 2.9.3; python_version=='3.11'", "google-cloud-bigquery-storage", From 15d963d8d6ac9eb254583eaf2c9b82ea2c75b3ee Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Thu, 6 Jun 2024 23:19:09 -0400 Subject: [PATCH 36/36] chore(main): release 1.54.0 (#3849) * chore(main): release 1.54.0 * Update CHANGELOG.md --------- Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> Co-authored-by: sasha-gitg <44654632+sasha-gitg@users.noreply.github.com> --- .release-please-manifest.json | 2 +- CHANGELOG.md | 25 +++++++++++++++++++ google/cloud/aiplatform/gapic_version.py | 2 +- .../schema/predict/instance/gapic_version.py | 2 +- .../predict/instance_v1/gapic_version.py | 2 +- .../v1/schema/predict/params/gapic_version.py | 2 +- .../schema/predict/params_v1/gapic_version.py | 2 +- .../predict/prediction/gapic_version.py | 2 +- .../predict/prediction_v1/gapic_version.py | 2 +- .../trainingjob/definition/gapic_version.py | 2 +- .../definition_v1/gapic_version.py | 2 +- .../schema/predict/instance/gapic_version.py | 2 +- .../predict/instance_v1beta1/gapic_version.py | 2 +- .../schema/predict/params/gapic_version.py | 2 +- .../predict/params_v1beta1/gapic_version.py | 2 +- .../predict/prediction/gapic_version.py | 2 +- .../prediction_v1beta1/gapic_version.py | 2 +- .../trainingjob/definition/gapic_version.py | 2 +- .../definition_v1beta1/gapic_version.py | 2 +- google/cloud/aiplatform/version.py | 2 +- google/cloud/aiplatform_v1/gapic_version.py | 2 +- .../cloud/aiplatform_v1beta1/gapic_version.py | 2 +- pypi/_vertex_ai_placeholder/version.py | 2 +- ...t_metadata_google.cloud.aiplatform.v1.json | 2 +- ...adata_google.cloud.aiplatform.v1beta1.json | 2 +- 25 files changed, 49 insertions(+), 24 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 620ccf0c92..25cd787011 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.53.0" + ".": "1.54.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 8259a99f16..801be073c3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,30 @@ # Changelog +## [1.54.0](https://github.com/googleapis/python-aiplatform/compare/v1.53.0...v1.54.0) (2024-06-06) + +### BREAKING CHANGES +* Remove Vertex SDK data science package ([ec4ec8f](https://github.com/googleapis/python-aiplatform/commit/ec4ec8f1214b3da12728c30a002b7f4632f4a90e)) + +### Features + +* Add display experiment run button for Ipython environments ([ba65828](https://github.com/googleapis/python-aiplatform/commit/ba6582856b1d7f9a6ac8f90a3fa5ea6723ac64ab)) +* Add hybrid search for public find_neighbors() call. ([9d35617](https://github.com/googleapis/python-aiplatform/commit/9d3561738d577129cb222417bf208166825d8043)) +* Enable Ray Job submission without VPC peering ([37875b5](https://github.com/googleapis/python-aiplatform/commit/37875b507f25c31ac4a84e4fefe3cbba565682e3)) +* GenAI - Allowed callable functions to return values directly in Automatic Function Calling ([768af67](https://github.com/googleapis/python-aiplatform/commit/768af6772ade2b67b90a05ae3db95039a3f2786d)) +* GenAI - Release ToolConfig to GA ([bc8b14a](https://github.com/googleapis/python-aiplatform/commit/bc8b14a7c9c632721db9166dc9b63eec17d31afd)) +* Sample code for Vertex AI Feature Store ([6c14e8b](https://github.com/googleapis/python-aiplatform/commit/6c14e8b31bd950ac4f4a862b4e62ead42fe30463)) +* Support VertexTool in langchain template. ([28a3c56](https://github.com/googleapis/python-aiplatform/commit/28a3c56fdcfa4fab819e8f79d235f6576febdfce)) + + +### Bug Fixes + +* Allow non-lro delete method ([c23c0ad](https://github.com/googleapis/python-aiplatform/commit/c23c0ada07146f0e5ce6a787c8255313f7c4a06c)) +* Deep copy dataset before passing it to evaluation ([019b610](https://github.com/googleapis/python-aiplatform/commit/019b6102c2dc98550592cde0adfbb4958faddbef)) +* Ensure model starts with publishers/ when users provide resource path from models/ ([d689331](https://github.com/googleapis/python-aiplatform/commit/d689331af5172cdfe7428333536954e8339f8ab4)) +* Fix failed unit tests due to google-cloud-storage upgrade. ([945b9e4](https://github.com/googleapis/python-aiplatform/commit/945b9e4835149111cd33beaee4301f3d8f05f59d)) +* Generalize RAG files import from Google Drive ([88c6a6a](https://github.com/googleapis/python-aiplatform/commit/88c6a6a4f11285d429c3777f59101e53e4672185)) +* Set upper bound of setuptools to unbreak public Colab for using vertex_ray namespace ([6cc45bb](https://github.com/googleapis/python-aiplatform/commit/6cc45bbbea154d087c1dfe4756d4e15f21b1d844)) + ## [1.53.0](https://github.com/googleapis/python-aiplatform/compare/v1.52.0...v1.53.0) (2024-05-30) diff --git a/google/cloud/aiplatform/gapic_version.py b/google/cloud/aiplatform/gapic_version.py index b2b84effcb..70cd5c996f 100644 --- a/google/cloud/aiplatform/gapic_version.py +++ b/google/cloud/aiplatform/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.53.0" # {x-release-please-version} +__version__ = "1.54.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/instance/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/instance/gapic_version.py index b2b84effcb..70cd5c996f 100644 --- a/google/cloud/aiplatform/v1/schema/predict/instance/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/instance/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.53.0" # {x-release-please-version} +__version__ = "1.54.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_version.py index b2b84effcb..70cd5c996f 100644 --- a/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.53.0" # {x-release-please-version} +__version__ = "1.54.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/params/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/params/gapic_version.py index b2b84effcb..70cd5c996f 100644 --- a/google/cloud/aiplatform/v1/schema/predict/params/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/params/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.53.0" # {x-release-please-version} +__version__ = "1.54.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_version.py index b2b84effcb..70cd5c996f 100644 --- a/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.53.0" # {x-release-please-version} +__version__ = "1.54.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/prediction/gapic_version.py index b2b84effcb..70cd5c996f 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.53.0" # {x-release-please-version} +__version__ = "1.54.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_version.py index b2b84effcb..70cd5c996f 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.53.0" # {x-release-please-version} +__version__ = "1.54.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition/gapic_version.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition/gapic_version.py index b2b84effcb..70cd5c996f 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.53.0" # {x-release-please-version} +__version__ = "1.54.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_version.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_version.py index b2b84effcb..70cd5c996f 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.53.0" # {x-release-please-version} +__version__ = "1.54.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance/gapic_version.py index b2b84effcb..70cd5c996f 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.53.0" # {x-release-please-version} +__version__ = "1.54.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_version.py index b2b84effcb..70cd5c996f 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.53.0" # {x-release-please-version} +__version__ = "1.54.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/params/gapic_version.py index b2b84effcb..70cd5c996f 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/params/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.53.0" # {x-release-please-version} +__version__ = "1.54.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_version.py index b2b84effcb..70cd5c996f 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.53.0" # {x-release-please-version} +__version__ = "1.54.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction/gapic_version.py index b2b84effcb..70cd5c996f 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.53.0" # {x-release-please-version} +__version__ = "1.54.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_version.py index b2b84effcb..70cd5c996f 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.53.0" # {x-release-please-version} +__version__ = "1.54.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/gapic_version.py index b2b84effcb..70cd5c996f 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.53.0" # {x-release-please-version} +__version__ = "1.54.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_version.py index b2b84effcb..70cd5c996f 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.53.0" # {x-release-please-version} +__version__ = "1.54.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/version.py b/google/cloud/aiplatform/version.py index 1fbaf641c4..842c65b9f1 100644 --- a/google/cloud/aiplatform/version.py +++ b/google/cloud/aiplatform/version.py @@ -15,4 +15,4 @@ # limitations under the License. # -__version__ = "1.53.0" +__version__ = "1.54.0" diff --git a/google/cloud/aiplatform_v1/gapic_version.py b/google/cloud/aiplatform_v1/gapic_version.py index b2b84effcb..70cd5c996f 100644 --- a/google/cloud/aiplatform_v1/gapic_version.py +++ b/google/cloud/aiplatform_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.53.0" # {x-release-please-version} +__version__ = "1.54.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform_v1beta1/gapic_version.py b/google/cloud/aiplatform_v1beta1/gapic_version.py index b2b84effcb..70cd5c996f 100644 --- a/google/cloud/aiplatform_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.53.0" # {x-release-please-version} +__version__ = "1.54.0" # {x-release-please-version} diff --git a/pypi/_vertex_ai_placeholder/version.py b/pypi/_vertex_ai_placeholder/version.py index 57b90e71a2..7dd6e612c5 100644 --- a/pypi/_vertex_ai_placeholder/version.py +++ b/pypi/_vertex_ai_placeholder/version.py @@ -15,4 +15,4 @@ # limitations under the License. # -__version__ = "1.53.0" +__version__ = "1.54.0" diff --git a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json index a939560bbb..5dff7f82a2 100644 --- a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json +++ b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-aiplatform", - "version": "1.53.0" + "version": "1.54.0" }, "snippets": [ { diff --git a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json index 13067b9265..8023c42b1f 100644 --- a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json +++ b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-aiplatform", - "version": "1.53.0" + "version": "1.54.0" }, "snippets": [ {